2024-12-02 14:08:10,921 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-02 14:08:10,932 main DEBUG Took 0.009341 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-02 14:08:10,932 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-02 14:08:10,932 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-02 14:08:10,933 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-02 14:08:10,935 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:08:10,943 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-02 14:08:10,957 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:08:10,959 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:08:10,960 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:08:10,960 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:08:10,961 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:08:10,961 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:08:10,962 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:08:10,963 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:08:10,963 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:08:10,963 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:08:10,965 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:08:10,965 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:08:10,966 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:08:10,966 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:08:10,966 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:08:10,967 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:08:10,967 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:08:10,968 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:08:10,968 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:08:10,969 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:08:10,969 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:08:10,970 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:08:10,970 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:08:10,971 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 14:08:10,971 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:08:10,972 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-02 14:08:10,973 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 14:08:10,975 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-02 14:08:10,977 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-02 14:08:10,977 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-02 14:08:10,979 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-02 14:08:10,979 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-02 14:08:10,988 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-02 14:08:10,990 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-02 14:08:10,992 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-02 14:08:10,993 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-02 14:08:10,993 main DEBUG createAppenders(={Console}) 2024-12-02 14:08:10,994 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-02 14:08:10,994 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-02 14:08:10,995 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-02 14:08:10,995 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-02 14:08:10,996 main DEBUG OutputStream closed 2024-12-02 14:08:10,996 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-02 14:08:10,996 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-02 14:08:10,997 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-02 14:08:11,077 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-02 14:08:11,080 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-02 14:08:11,081 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-02 14:08:11,082 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-02 14:08:11,083 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-02 14:08:11,083 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-02 14:08:11,084 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-02 14:08:11,084 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-02 14:08:11,084 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-02 14:08:11,085 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-02 14:08:11,085 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-02 14:08:11,086 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-02 14:08:11,086 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-02 14:08:11,086 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-02 14:08:11,086 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-02 14:08:11,087 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-02 14:08:11,087 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-02 14:08:11,088 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-02 14:08:11,090 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02 14:08:11,090 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-02 14:08:11,091 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-02 14:08:11,091 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-02T14:08:11,299 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1 2024-12-02 14:08:11,302 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-02 14:08:11,302 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02T14:08:11,310 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-02T14:08:11,338 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=140, ProcessCount=11, AvailableMemoryMB=7304 2024-12-02T14:08:11,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T14:08:11,354 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/cluster_c7473284-84e7-c5cd-0f7b-2a3e05e24376, deleteOnExit=true 2024-12-02T14:08:11,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T14:08:11,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/test.cache.data in system properties and HBase conf 2024-12-02T14:08:11,356 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T14:08:11,356 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/hadoop.log.dir in system properties and HBase conf 2024-12-02T14:08:11,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T14:08:11,358 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T14:08:11,358 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T14:08:11,450 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-02T14:08:11,546 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T14:08:11,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:08:11,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:08:11,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T14:08:11,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:08:11,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T14:08:11,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T14:08:11,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:08:11,551 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:08:11,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T14:08:11,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/nfs.dump.dir in system properties and HBase conf 2024-12-02T14:08:11,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/java.io.tmpdir in system properties and HBase conf 2024-12-02T14:08:11,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:08:11,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T14:08:11,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T14:08:11,943 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T14:08:12,213 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-02T14:08:12,283 INFO [Time-limited test {}] log.Log(170): Logging initialized @1945ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-02T14:08:12,349 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:08:12,405 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:08:12,422 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:08:12,422 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:08:12,424 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:08:12,434 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:08:12,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:08:12,438 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:08:12,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/java.io.tmpdir/jetty-localhost-43483-hadoop-hdfs-3_4_1-tests_jar-_-any-8774066977428887704/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:08:12,601 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:43483} 2024-12-02T14:08:12,602 INFO [Time-limited test {}] server.Server(415): Started @2264ms 2024-12-02T14:08:12,627 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T14:08:12,899 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:08:12,906 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:08:12,907 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:08:12,907 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:08:12,908 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T14:08:12,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:08:12,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:08:13,011 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/java.io.tmpdir/jetty-localhost-34849-hadoop-hdfs-3_4_1-tests_jar-_-any-9069705744918792288/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:08:13,012 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:34849} 2024-12-02T14:08:13,012 INFO [Time-limited test {}] server.Server(415): Started @2674ms 2024-12-02T14:08:13,058 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:08:13,156 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:08:13,163 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:08:13,165 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:08:13,166 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:08:13,166 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T14:08:13,167 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:08:13,168 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:08:13,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/java.io.tmpdir/jetty-localhost-40159-hadoop-hdfs-3_4_1-tests_jar-_-any-358628431755566108/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:08:13,271 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:40159} 2024-12-02T14:08:13,271 INFO [Time-limited test {}] server.Server(415): Started @2934ms 2024-12-02T14:08:13,273 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:08:13,405 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/cluster_c7473284-84e7-c5cd-0f7b-2a3e05e24376/data/data4/current/BP-207308689-172.17.0.2-1733148492023/current, will proceed with Du for space computation calculation, 2024-12-02T14:08:13,405 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/cluster_c7473284-84e7-c5cd-0f7b-2a3e05e24376/data/data1/current/BP-207308689-172.17.0.2-1733148492023/current, will proceed with Du for space computation calculation, 2024-12-02T14:08:13,405 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/cluster_c7473284-84e7-c5cd-0f7b-2a3e05e24376/data/data2/current/BP-207308689-172.17.0.2-1733148492023/current, will proceed with Du for space computation calculation, 2024-12-02T14:08:13,405 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/cluster_c7473284-84e7-c5cd-0f7b-2a3e05e24376/data/data3/current/BP-207308689-172.17.0.2-1733148492023/current, will proceed with Du for space computation calculation, 2024-12-02T14:08:13,455 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:08:13,455 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:08:13,512 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa8f766994e1fde30 with lease ID 0x3eef4d14873ee6fe: Processing first storage report for DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132 from datanode DatanodeRegistration(127.0.0.1:37629, datanodeUuid=dbf527d8-df80-4746-bc2c-10385216274e, infoPort=41385, infoSecurePort=0, ipcPort=39957, storageInfo=lv=-57;cid=testClusterID;nsid=1298534526;c=1733148492023) 2024-12-02T14:08:13,514 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa8f766994e1fde30 with lease ID 0x3eef4d14873ee6fe: from storage DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132 node DatanodeRegistration(127.0.0.1:37629, datanodeUuid=dbf527d8-df80-4746-bc2c-10385216274e, infoPort=41385, infoSecurePort=0, ipcPort=39957, storageInfo=lv=-57;cid=testClusterID;nsid=1298534526;c=1733148492023), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T14:08:13,514 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x622fb172cf7b7938 with lease ID 0x3eef4d14873ee6fd: Processing first storage report for DS-f312db39-1fda-474d-bb02-ca866b5f1911 from datanode DatanodeRegistration(127.0.0.1:45893, datanodeUuid=0120331a-e668-4d65-b2c8-9a2f2cc54cb9, infoPort=38457, infoSecurePort=0, ipcPort=38397, storageInfo=lv=-57;cid=testClusterID;nsid=1298534526;c=1733148492023) 2024-12-02T14:08:13,514 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x622fb172cf7b7938 with lease ID 0x3eef4d14873ee6fd: from storage DS-f312db39-1fda-474d-bb02-ca866b5f1911 node DatanodeRegistration(127.0.0.1:45893, datanodeUuid=0120331a-e668-4d65-b2c8-9a2f2cc54cb9, infoPort=38457, infoSecurePort=0, ipcPort=38397, storageInfo=lv=-57;cid=testClusterID;nsid=1298534526;c=1733148492023), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:08:13,515 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa8f766994e1fde30 with lease ID 0x3eef4d14873ee6fe: Processing first storage report for DS-23e9dd9c-4ca8-4cb0-be2f-9a0f1523040c from datanode DatanodeRegistration(127.0.0.1:37629, datanodeUuid=dbf527d8-df80-4746-bc2c-10385216274e, infoPort=41385, infoSecurePort=0, ipcPort=39957, storageInfo=lv=-57;cid=testClusterID;nsid=1298534526;c=1733148492023) 2024-12-02T14:08:13,515 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa8f766994e1fde30 with lease ID 0x3eef4d14873ee6fe: from storage DS-23e9dd9c-4ca8-4cb0-be2f-9a0f1523040c node DatanodeRegistration(127.0.0.1:37629, datanodeUuid=dbf527d8-df80-4746-bc2c-10385216274e, infoPort=41385, infoSecurePort=0, ipcPort=39957, storageInfo=lv=-57;cid=testClusterID;nsid=1298534526;c=1733148492023), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T14:08:13,515 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x622fb172cf7b7938 with lease ID 0x3eef4d14873ee6fd: Processing first storage report for DS-f34a283d-60d5-44fa-8850-d35e50d14dc8 from datanode DatanodeRegistration(127.0.0.1:45893, datanodeUuid=0120331a-e668-4d65-b2c8-9a2f2cc54cb9, infoPort=38457, infoSecurePort=0, ipcPort=38397, storageInfo=lv=-57;cid=testClusterID;nsid=1298534526;c=1733148492023) 2024-12-02T14:08:13,516 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x622fb172cf7b7938 with lease ID 0x3eef4d14873ee6fd: from storage DS-f34a283d-60d5-44fa-8850-d35e50d14dc8 node DatanodeRegistration(127.0.0.1:45893, datanodeUuid=0120331a-e668-4d65-b2c8-9a2f2cc54cb9, infoPort=38457, infoSecurePort=0, ipcPort=38397, storageInfo=lv=-57;cid=testClusterID;nsid=1298534526;c=1733148492023), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:08:13,583 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1 2024-12-02T14:08:13,667 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/cluster_c7473284-84e7-c5cd-0f7b-2a3e05e24376/zookeeper_0, clientPort=52500, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/cluster_c7473284-84e7-c5cd-0f7b-2a3e05e24376/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/cluster_c7473284-84e7-c5cd-0f7b-2a3e05e24376/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T14:08:13,676 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52500 2024-12-02T14:08:13,687 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:08:13,689 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:08:13,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:08:13,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:08:14,310 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1 with version=8 2024-12-02T14:08:14,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/hbase-staging 2024-12-02T14:08:14,379 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-02T14:08:14,562 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:08:14,570 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:08:14,571 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:08:14,574 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:08:14,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:08:14,575 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:08:14,679 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T14:08:14,727 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-02T14:08:14,734 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-02T14:08:14,737 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:08:14,757 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 66907 (auto-detected) 2024-12-02T14:08:14,758 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-02T14:08:14,773 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45661 2024-12-02T14:08:14,789 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45661 connecting to ZooKeeper ensemble=127.0.0.1:52500 2024-12-02T14:08:14,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:456610x0, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:08:14,815 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45661-0x1009b42a3c00000 connected 2024-12-02T14:08:14,842 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:08:14,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:08:14,857 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:08:14,861 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1, hbase.cluster.distributed=false 2024-12-02T14:08:14,880 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:08:14,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45661 2024-12-02T14:08:14,885 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45661 2024-12-02T14:08:14,885 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45661 2024-12-02T14:08:14,887 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45661 2024-12-02T14:08:14,887 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45661 2024-12-02T14:08:14,981 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:08:14,983 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:08:14,983 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:08:14,983 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:08:14,983 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:08:14,983 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:08:14,986 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T14:08:14,988 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:08:14,989 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39167 2024-12-02T14:08:14,991 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39167 connecting to ZooKeeper ensemble=127.0.0.1:52500 2024-12-02T14:08:14,992 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:08:14,995 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:08:15,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:391670x0, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:08:15,002 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:391670x0, quorum=127.0.0.1:52500, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:08:15,002 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39167-0x1009b42a3c00001 connected 2024-12-02T14:08:15,006 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T14:08:15,014 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T14:08:15,017 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T14:08:15,021 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:08:15,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39167 2024-12-02T14:08:15,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39167 2024-12-02T14:08:15,023 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39167 2024-12-02T14:08:15,023 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39167 2024-12-02T14:08:15,025 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39167 2024-12-02T14:08:15,038 DEBUG [M:0;a3a61c9ba14f:45661 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a3a61c9ba14f:45661 2024-12-02T14:08:15,039 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a3a61c9ba14f,45661,1733148494419 2024-12-02T14:08:15,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:08:15,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:08:15,047 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a3a61c9ba14f,45661,1733148494419 2024-12-02T14:08:15,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:08:15,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T14:08:15,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:08:15,074 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T14:08:15,074 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a3a61c9ba14f,45661,1733148494419 from backup master directory 2024-12-02T14:08:15,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a3a61c9ba14f,45661,1733148494419 2024-12-02T14:08:15,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:08:15,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:08:15,078 WARN [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:08:15,078 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a3a61c9ba14f,45661,1733148494419 2024-12-02T14:08:15,080 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-02T14:08:15,081 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-02T14:08:15,128 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/hbase.id] with ID: a5d27c7c-38eb-49ed-8581-4293b4d513f3 2024-12-02T14:08:15,128 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/.tmp/hbase.id 2024-12-02T14:08:15,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:08:15,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:08:15,141 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/.tmp/hbase.id]:[hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/hbase.id] 2024-12-02T14:08:15,183 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:08:15,187 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T14:08:15,203 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 14ms. 2024-12-02T14:08:15,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:08:15,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:08:15,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:08:15,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:08:15,237 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T14:08:15,238 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T14:08:15,243 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:08:15,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:08:15,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:08:15,286 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store 2024-12-02T14:08:15,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:08:15,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:08:15,309 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-02T14:08:15,312 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:08:15,313 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:08:15,314 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:08:15,314 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:08:15,315 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:08:15,316 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:08:15,316 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:08:15,317 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733148495313Disabling compacts and flushes for region at 1733148495313Disabling writes for close at 1733148495315 (+2 ms)Writing region close event to WAL at 1733148495316 (+1 ms)Closed at 1733148495316 2024-12-02T14:08:15,319 WARN [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/.initializing 2024-12-02T14:08:15,319 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/WALs/a3a61c9ba14f,45661,1733148494419 2024-12-02T14:08:15,338 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C45661%2C1733148494419, suffix=, logDir=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/WALs/a3a61c9ba14f,45661,1733148494419, archiveDir=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/oldWALs, maxLogs=10 2024-12-02T14:08:15,347 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C45661%2C1733148494419.1733148495343 2024-12-02T14:08:15,365 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/WALs/a3a61c9ba14f,45661,1733148494419/a3a61c9ba14f%2C45661%2C1733148494419.1733148495343 2024-12-02T14:08:15,375 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38457:38457),(127.0.0.1/127.0.0.1:41385:41385)] 2024-12-02T14:08:15,376 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:08:15,376 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:08:15,379 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:08:15,381 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:08:15,417 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:08:15,440 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T14:08:15,443 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:15,446 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:08:15,446 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:08:15,449 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T14:08:15,450 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:15,451 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:08:15,451 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:08:15,453 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T14:08:15,453 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:15,454 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:08:15,454 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:08:15,457 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T14:08:15,457 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:15,458 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:08:15,458 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:08:15,461 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:08:15,462 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:08:15,467 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:08:15,468 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:08:15,471 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T14:08:15,474 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:08:15,478 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:08:15,480 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=792421, jitterRate=0.007616370916366577}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T14:08:15,488 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733148495393Initializing all the Stores at 1733148495395 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148495395Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148495396 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148495396Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148495396Cleaning up temporary data from old regions at 1733148495468 (+72 ms)Region opened successfully at 1733148495488 (+20 ms) 2024-12-02T14:08:15,490 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T14:08:15,523 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c08cebe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:08:15,548 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T14:08:15,556 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T14:08:15,557 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T14:08:15,559 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T14:08:15,561 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-02T14:08:15,565 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-02T14:08:15,565 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T14:08:15,587 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T14:08:15,594 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T14:08:15,595 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T14:08:15,598 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T14:08:15,599 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T14:08:15,601 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T14:08:15,603 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T14:08:15,606 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T14:08:15,607 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T14:08:15,609 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T14:08:15,610 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T14:08:15,623 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T14:08:15,624 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T14:08:15,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:08:15,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:08:15,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:08:15,627 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:08:15,629 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a3a61c9ba14f,45661,1733148494419, sessionid=0x1009b42a3c00000, setting cluster-up flag (Was=false) 2024-12-02T14:08:15,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:08:15,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:08:15,641 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T14:08:15,643 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a3a61c9ba14f,45661,1733148494419 2024-12-02T14:08:15,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:08:15,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:08:15,652 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T14:08:15,654 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a3a61c9ba14f,45661,1733148494419 2024-12-02T14:08:15,660 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T14:08:15,721 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T14:08:15,729 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(746): ClusterId : a5d27c7c-38eb-49ed-8581-4293b4d513f3 2024-12-02T14:08:15,730 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T14:08:15,731 DEBUG [RS:0;a3a61c9ba14f:39167 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T14:08:15,735 DEBUG [RS:0;a3a61c9ba14f:39167 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T14:08:15,735 DEBUG [RS:0;a3a61c9ba14f:39167 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T14:08:15,736 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T14:08:15,738 DEBUG [RS:0;a3a61c9ba14f:39167 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T14:08:15,738 DEBUG [RS:0;a3a61c9ba14f:39167 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b32057, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:08:15,741 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a3a61c9ba14f,45661,1733148494419 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T14:08:15,747 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:08:15,747 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:08:15,748 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:08:15,748 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:08:15,748 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a3a61c9ba14f:0, corePoolSize=10, maxPoolSize=10 2024-12-02T14:08:15,748 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:08:15,748 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:08:15,748 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:08:15,750 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733148525749 2024-12-02T14:08:15,751 DEBUG [RS:0;a3a61c9ba14f:39167 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a3a61c9ba14f:39167 2024-12-02T14:08:15,751 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T14:08:15,752 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T14:08:15,754 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T14:08:15,754 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T14:08:15,754 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:08:15,754 DEBUG [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T14:08:15,754 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T14:08:15,755 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T14:08:15,755 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T14:08:15,756 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T14:08:15,756 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T14:08:15,756 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(2659): reportForDuty to master=a3a61c9ba14f,45661,1733148494419 with port=39167, startcode=1733148494949 2024-12-02T14:08:15,756 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:15,759 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T14:08:15,760 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T14:08:15,760 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:15,761 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T14:08:15,761 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T14:08:15,762 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T14:08:15,763 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T14:08:15,767 DEBUG [RS:0;a3a61c9ba14f:39167 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T14:08:15,767 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148495764,5,FailOnTimeoutGroup] 2024-12-02T14:08:15,769 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148495767,5,FailOnTimeoutGroup] 2024-12-02T14:08:15,769 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:15,769 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T14:08:15,771 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:15,771 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:15,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:08:15,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:08:15,779 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T14:08:15,779 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1 2024-12-02T14:08:15,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:08:15,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:08:15,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:08:15,810 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:08:15,814 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:08:15,814 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:15,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:08:15,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:08:15,818 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:08:15,818 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:15,819 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:08:15,819 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:08:15,823 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:08:15,823 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:15,824 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:08:15,824 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:08:15,828 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:08:15,828 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:15,829 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:08:15,829 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:08:15,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740 2024-12-02T14:08:15,832 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740 2024-12-02T14:08:15,836 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:08:15,836 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:08:15,836 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60295, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T14:08:15,837 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:08:15,840 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:08:15,844 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45661 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a3a61c9ba14f,39167,1733148494949 2024-12-02T14:08:15,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45661 {}] master.ServerManager(517): Registering regionserver=a3a61c9ba14f,39167,1733148494949 2024-12-02T14:08:15,847 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:08:15,849 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795564, jitterRate=0.011612191796302795}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:08:15,851 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733148495802Initializing all the Stores at 1733148495805 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148495805Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148495809 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148495809Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148495809Cleaning up temporary data from old regions at 1733148495836 (+27 ms)Region opened successfully at 1733148495851 (+15 ms) 2024-12-02T14:08:15,851 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:08:15,851 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:08:15,851 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:08:15,851 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:08:15,852 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:08:15,853 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:08:15,853 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733148495851Disabling compacts and flushes for region at 1733148495851Disabling writes for close at 1733148495852 (+1 ms)Writing region close event to WAL at 1733148495852Closed at 1733148495853 (+1 ms) 2024-12-02T14:08:15,856 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:08:15,856 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T14:08:15,862 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T14:08:15,863 DEBUG [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1 2024-12-02T14:08:15,863 DEBUG [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42389 2024-12-02T14:08:15,863 DEBUG [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T14:08:15,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:08:15,867 DEBUG [RS:0;a3a61c9ba14f:39167 {}] zookeeper.ZKUtil(111): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a3a61c9ba14f,39167,1733148494949 2024-12-02T14:08:15,867 WARN [RS:0;a3a61c9ba14f:39167 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:08:15,867 INFO [RS:0;a3a61c9ba14f:39167 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:08:15,868 DEBUG [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949 2024-12-02T14:08:15,869 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:08:15,870 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a3a61c9ba14f,39167,1733148494949] 2024-12-02T14:08:15,872 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T14:08:15,895 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T14:08:15,907 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T14:08:15,911 INFO [RS:0;a3a61c9ba14f:39167 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T14:08:15,911 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:15,912 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T14:08:15,917 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T14:08:15,919 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:15,919 DEBUG [RS:0;a3a61c9ba14f:39167 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:08:15,919 DEBUG [RS:0;a3a61c9ba14f:39167 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:08:15,919 DEBUG [RS:0;a3a61c9ba14f:39167 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:08:15,920 DEBUG [RS:0;a3a61c9ba14f:39167 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:08:15,920 DEBUG [RS:0;a3a61c9ba14f:39167 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:08:15,920 DEBUG [RS:0;a3a61c9ba14f:39167 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:08:15,920 DEBUG [RS:0;a3a61c9ba14f:39167 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:08:15,920 DEBUG [RS:0;a3a61c9ba14f:39167 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:08:15,920 DEBUG [RS:0;a3a61c9ba14f:39167 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:08:15,920 DEBUG [RS:0;a3a61c9ba14f:39167 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:08:15,920 DEBUG [RS:0;a3a61c9ba14f:39167 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:08:15,921 DEBUG [RS:0;a3a61c9ba14f:39167 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:08:15,921 DEBUG [RS:0;a3a61c9ba14f:39167 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:08:15,921 DEBUG [RS:0;a3a61c9ba14f:39167 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:08:15,922 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:15,922 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:15,922 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:15,922 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:15,922 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:15,922 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,39167,1733148494949-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:08:15,944 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T14:08:15,946 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,39167,1733148494949-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:15,946 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:15,946 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.Replication(171): a3a61c9ba14f,39167,1733148494949 started 2024-12-02T14:08:15,961 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:15,962 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(1482): Serving as a3a61c9ba14f,39167,1733148494949, RpcServer on a3a61c9ba14f/172.17.0.2:39167, sessionid=0x1009b42a3c00001 2024-12-02T14:08:15,962 DEBUG [RS:0;a3a61c9ba14f:39167 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T14:08:15,962 DEBUG [RS:0;a3a61c9ba14f:39167 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a3a61c9ba14f,39167,1733148494949 2024-12-02T14:08:15,963 DEBUG [RS:0;a3a61c9ba14f:39167 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,39167,1733148494949' 2024-12-02T14:08:15,963 DEBUG [RS:0;a3a61c9ba14f:39167 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T14:08:15,964 DEBUG [RS:0;a3a61c9ba14f:39167 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T14:08:15,964 DEBUG [RS:0;a3a61c9ba14f:39167 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T14:08:15,965 DEBUG [RS:0;a3a61c9ba14f:39167 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T14:08:15,965 DEBUG [RS:0;a3a61c9ba14f:39167 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a3a61c9ba14f,39167,1733148494949 2024-12-02T14:08:15,965 DEBUG [RS:0;a3a61c9ba14f:39167 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,39167,1733148494949' 2024-12-02T14:08:15,965 DEBUG [RS:0;a3a61c9ba14f:39167 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T14:08:15,966 DEBUG [RS:0;a3a61c9ba14f:39167 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T14:08:15,966 DEBUG [RS:0;a3a61c9ba14f:39167 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T14:08:15,966 INFO [RS:0;a3a61c9ba14f:39167 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T14:08:15,966 INFO [RS:0;a3a61c9ba14f:39167 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T14:08:16,024 WARN [a3a61c9ba14f:45661 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T14:08:16,076 INFO [RS:0;a3a61c9ba14f:39167 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C39167%2C1733148494949, suffix=, logDir=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949, archiveDir=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/oldWALs, maxLogs=32 2024-12-02T14:08:16,079 INFO [RS:0;a3a61c9ba14f:39167 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C39167%2C1733148494949.1733148496079 2024-12-02T14:08:16,087 INFO [RS:0;a3a61c9ba14f:39167 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148496079 2024-12-02T14:08:16,089 DEBUG [RS:0;a3a61c9ba14f:39167 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38457:38457),(127.0.0.1/127.0.0.1:41385:41385)] 2024-12-02T14:08:16,280 DEBUG [a3a61c9ba14f:45661 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T14:08:16,293 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a3a61c9ba14f,39167,1733148494949 2024-12-02T14:08:16,300 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a3a61c9ba14f,39167,1733148494949, state=OPENING 2024-12-02T14:08:16,304 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T14:08:16,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:08:16,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:08:16,306 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:08:16,306 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:08:16,307 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:08:16,308 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,39167,1733148494949}] 2024-12-02T14:08:16,489 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T14:08:16,492 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48317, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T14:08:16,504 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T14:08:16,505 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:08:16,508 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C39167%2C1733148494949.meta, suffix=.meta, logDir=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949, archiveDir=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/oldWALs, maxLogs=32 2024-12-02T14:08:16,510 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C39167%2C1733148494949.meta.1733148496509.meta 2024-12-02T14:08:16,518 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.meta.1733148496509.meta 2024-12-02T14:08:16,521 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41385:41385),(127.0.0.1/127.0.0.1:38457:38457)] 2024-12-02T14:08:16,522 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:08:16,524 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T14:08:16,526 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T14:08:16,530 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T14:08:16,534 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T14:08:16,534 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:08:16,534 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T14:08:16,535 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T14:08:16,537 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:08:16,539 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:08:16,539 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:16,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:08:16,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:08:16,541 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:08:16,541 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:16,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:08:16,542 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:08:16,544 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:08:16,544 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:16,545 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:08:16,545 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:08:16,546 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:08:16,546 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:16,547 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:08:16,547 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:08:16,549 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740 2024-12-02T14:08:16,551 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740 2024-12-02T14:08:16,553 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:08:16,554 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:08:16,554 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:08:16,557 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:08:16,559 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742300, jitterRate=-0.05611732602119446}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:08:16,559 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T14:08:16,560 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733148496535Writing region info on filesystem at 1733148496535Initializing all the Stores at 1733148496537 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148496537Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148496537Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148496537Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148496537Cleaning up temporary data from old regions at 1733148496554 (+17 ms)Running coprocessor post-open hooks at 1733148496559 (+5 ms)Region opened successfully at 1733148496560 (+1 ms) 2024-12-02T14:08:16,565 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733148496480 2024-12-02T14:08:16,576 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T14:08:16,576 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T14:08:16,578 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a3a61c9ba14f,39167,1733148494949 2024-12-02T14:08:16,579 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a3a61c9ba14f,39167,1733148494949, state=OPEN 2024-12-02T14:08:16,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:08:16,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:08:16,582 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:08:16,582 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:08:16,582 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,39167,1733148494949 2024-12-02T14:08:16,588 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T14:08:16,588 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,39167,1733148494949 in 275 msec 2024-12-02T14:08:16,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T14:08:16,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 729 msec 2024-12-02T14:08:16,596 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:08:16,597 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T14:08:16,615 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:08:16,616 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a3a61c9ba14f,39167,1733148494949, seqNum=-1] 2024-12-02T14:08:16,633 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:08:16,635 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58169, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:08:16,653 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 965 msec 2024-12-02T14:08:16,654 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733148496653, completionTime=-1 2024-12-02T14:08:16,656 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T14:08:16,656 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T14:08:16,676 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-02T14:08:16,677 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733148556676 2024-12-02T14:08:16,677 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733148616677 2024-12-02T14:08:16,677 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 21 msec 2024-12-02T14:08:16,679 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,45661,1733148494419-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:16,679 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,45661,1733148494419-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:16,680 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,45661,1733148494419-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:16,681 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a3a61c9ba14f:45661, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:16,681 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:16,681 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:16,687 DEBUG [master/a3a61c9ba14f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T14:08:16,703 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.625sec 2024-12-02T14:08:16,704 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T14:08:16,705 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T14:08:16,706 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T14:08:16,707 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T14:08:16,707 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T14:08:16,708 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,45661,1733148494419-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:08:16,708 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,45661,1733148494419-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T14:08:16,716 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T14:08:16,717 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T14:08:16,717 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,45661,1733148494419-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:08:16,737 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b598c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:08:16,739 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-02T14:08:16,739 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-02T14:08:16,742 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a3a61c9ba14f,45661,-1 for getting cluster id 2024-12-02T14:08:16,744 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T14:08:16,751 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a5d27c7c-38eb-49ed-8581-4293b4d513f3' 2024-12-02T14:08:16,754 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T14:08:16,754 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a5d27c7c-38eb-49ed-8581-4293b4d513f3" 2024-12-02T14:08:16,756 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32768bc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:08:16,756 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a3a61c9ba14f,45661,-1] 2024-12-02T14:08:16,759 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T14:08:16,760 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:08:16,761 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43900, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T14:08:16,764 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36e15a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:08:16,764 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:08:16,771 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a3a61c9ba14f,39167,1733148494949, seqNum=-1] 2024-12-02T14:08:16,771 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:08:16,773 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50920, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:08:16,815 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a3a61c9ba14f,45661,1733148494419 2024-12-02T14:08:16,816 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:08:16,827 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-02T14:08:16,832 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T14:08:16,837 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is a3a61c9ba14f,45661,1733148494419 2024-12-02T14:08:16,839 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@50099097 2024-12-02T14:08:16,840 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T14:08:16,843 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43908, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T14:08:16,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45661 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T14:08:16,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45661 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T14:08:16,848 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45661 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T14:08:16,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45661 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-02T14:08:16,857 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T14:08:16,859 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45661 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-02T14:08:16,859 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:16,861 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T14:08:16,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T14:08:16,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741835_1011 (size=389) 2024-12-02T14:08:16,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741835_1011 (size=389) 2024-12-02T14:08:16,916 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ddd527f5248d4da9c8e9f317318eaae2, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1 2024-12-02T14:08:16,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741836_1012 (size=72) 2024-12-02T14:08:16,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741836_1012 (size=72) 2024-12-02T14:08:16,926 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:08:16,926 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing ddd527f5248d4da9c8e9f317318eaae2, disabling compactions & flushes 2024-12-02T14:08:16,926 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. 2024-12-02T14:08:16,926 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. 2024-12-02T14:08:16,926 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. after waiting 0 ms 2024-12-02T14:08:16,926 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. 2024-12-02T14:08:16,926 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. 2024-12-02T14:08:16,926 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for ddd527f5248d4da9c8e9f317318eaae2: Waiting for close lock at 1733148496926Disabling compacts and flushes for region at 1733148496926Disabling writes for close at 1733148496926Writing region close event to WAL at 1733148496926Closed at 1733148496926 2024-12-02T14:08:16,928 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T14:08:16,932 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733148496928"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733148496928"}]},"ts":"1733148496928"} 2024-12-02T14:08:16,937 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T14:08:16,939 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T14:08:16,941 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733148496939"}]},"ts":"1733148496939"} 2024-12-02T14:08:16,949 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-02T14:08:16,951 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ddd527f5248d4da9c8e9f317318eaae2, ASSIGN}] 2024-12-02T14:08:16,953 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ddd527f5248d4da9c8e9f317318eaae2, ASSIGN 2024-12-02T14:08:16,955 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ddd527f5248d4da9c8e9f317318eaae2, ASSIGN; state=OFFLINE, location=a3a61c9ba14f,39167,1733148494949; forceNewPlan=false, retain=false 2024-12-02T14:08:17,108 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ddd527f5248d4da9c8e9f317318eaae2, regionState=OPENING, regionLocation=a3a61c9ba14f,39167,1733148494949 2024-12-02T14:08:17,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ddd527f5248d4da9c8e9f317318eaae2, ASSIGN because future has completed 2024-12-02T14:08:17,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ddd527f5248d4da9c8e9f317318eaae2, server=a3a61c9ba14f,39167,1733148494949}] 2024-12-02T14:08:17,282 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. 2024-12-02T14:08:17,282 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ddd527f5248d4da9c8e9f317318eaae2, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:08:17,283 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:08:17,283 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:08:17,283 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:08:17,283 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:08:17,286 INFO [StoreOpener-ddd527f5248d4da9c8e9f317318eaae2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:08:17,288 INFO [StoreOpener-ddd527f5248d4da9c8e9f317318eaae2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ddd527f5248d4da9c8e9f317318eaae2 columnFamilyName info 2024-12-02T14:08:17,288 DEBUG [StoreOpener-ddd527f5248d4da9c8e9f317318eaae2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:08:17,290 INFO [StoreOpener-ddd527f5248d4da9c8e9f317318eaae2-1 {}] regionserver.HStore(327): Store=ddd527f5248d4da9c8e9f317318eaae2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:08:17,290 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:08:17,291 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:08:17,292 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:08:17,292 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:08:17,292 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:08:17,295 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:08:17,298 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:08:17,299 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ddd527f5248d4da9c8e9f317318eaae2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=854088, jitterRate=0.086029052734375}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:08:17,299 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:08:17,300 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ddd527f5248d4da9c8e9f317318eaae2: Running coprocessor pre-open hook at 1733148497283Writing region info on filesystem at 1733148497283Initializing all the Stores at 1733148497285 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148497285Cleaning up temporary data from old regions at 1733148497293 (+8 ms)Running coprocessor post-open hooks at 1733148497299 (+6 ms)Region opened successfully at 1733148497300 (+1 ms) 2024-12-02T14:08:17,303 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2., pid=6, masterSystemTime=1733148497274 2024-12-02T14:08:17,306 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. 2024-12-02T14:08:17,306 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. 2024-12-02T14:08:17,307 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ddd527f5248d4da9c8e9f317318eaae2, regionState=OPEN, openSeqNum=2, regionLocation=a3a61c9ba14f,39167,1733148494949 2024-12-02T14:08:17,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ddd527f5248d4da9c8e9f317318eaae2, server=a3a61c9ba14f,39167,1733148494949 because future has completed 2024-12-02T14:08:17,316 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T14:08:17,316 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ddd527f5248d4da9c8e9f317318eaae2, server=a3a61c9ba14f,39167,1733148494949 in 193 msec 2024-12-02T14:08:17,321 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T14:08:17,321 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ddd527f5248d4da9c8e9f317318eaae2, ASSIGN in 365 msec 2024-12-02T14:08:17,322 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T14:08:17,322 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733148497322"}]},"ts":"1733148497322"} 2024-12-02T14:08:17,326 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-02T14:08:17,328 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T14:08:17,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 478 msec 2024-12-02T14:08:22,038 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-02T14:08:22,084 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T14:08:22,085 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-02T14:08:24,724 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T14:08:24,725 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-02T14:08:24,728 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-02T14:08:24,728 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-02T14:08:24,729 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T14:08:24,730 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-02T14:08:24,730 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T14:08:24,730 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-02T14:08:26,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45661 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T14:08:26,961 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-02T14:08:26,969 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-02T14:08:26,979 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-02T14:08:26,979 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. 2024-12-02T14:08:26,980 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C39167%2C1733148494949.1733148506980 2024-12-02T14:08:26,989 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:26,989 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:26,990 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:26,990 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:26,990 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:26,990 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148496079 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148506980 2024-12-02T14:08:26,992 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41385:41385),(127.0.0.1/127.0.0.1:38457:38457)] 2024-12-02T14:08:26,992 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148496079 is not closed yet, will try archiving it next time 2024-12-02T14:08:26,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741833_1009 (size=451) 2024-12-02T14:08:26,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741833_1009 (size=451) 2024-12-02T14:08:26,995 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148496079 to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/oldWALs/a3a61c9ba14f%2C39167%2C1733148494949.1733148496079 2024-12-02T14:08:27,001 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2., hostname=a3a61c9ba14f,39167,1733148494949, seqNum=2] 2024-12-02T14:08:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39167 {}] regionserver.HRegion(8855): Flush requested on ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:08:39,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ddd527f5248d4da9c8e9f317318eaae2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T14:08:39,111 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/db8facbd0b53409881c42406d3a43f75 is 1080, key is row0001/info:/1733148507004/Put/seqid=0 2024-12-02T14:08:39,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741838_1014 (size=12509) 2024-12-02T14:08:39,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741838_1014 (size=12509) 2024-12-02T14:08:39,122 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/db8facbd0b53409881c42406d3a43f75 2024-12-02T14:08:39,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/db8facbd0b53409881c42406d3a43f75 as hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/db8facbd0b53409881c42406d3a43f75 2024-12-02T14:08:39,175 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/db8facbd0b53409881c42406d3a43f75, entries=7, sequenceid=11, filesize=12.2 K 2024-12-02T14:08:39,183 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ddd527f5248d4da9c8e9f317318eaae2 in 126ms, sequenceid=11, compaction requested=false 2024-12-02T14:08:39,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ddd527f5248d4da9c8e9f317318eaae2: 2024-12-02T14:08:43,580 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T14:08:47,079 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C39167%2C1733148494949.1733148527078 2024-12-02T14:08:47,294 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 211 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:08:47,295 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:47,295 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:47,295 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:47,295 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:47,295 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:47,296 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148506980 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148527078 2024-12-02T14:08:47,297 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41385:41385),(127.0.0.1/127.0.0.1:38457:38457)] 2024-12-02T14:08:47,297 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148506980 is not closed yet, will try archiving it next time 2024-12-02T14:08:47,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741837_1013 (size=12399) 2024-12-02T14:08:47,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741837_1013 (size=12399) 2024-12-02T14:08:47,502 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:08:49,708 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:08:51,915 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:08:54,121 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:08:54,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39167 {}] regionserver.HRegion(8855): Flush requested on ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:08:54,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ddd527f5248d4da9c8e9f317318eaae2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T14:08:54,325 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:08:54,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/82b5d3d4cbdc4569b55c1afb82f07bd3 is 1080, key is row0008/info:/1733148521058/Put/seqid=0 2024-12-02T14:08:54,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741840_1016 (size=12509) 2024-12-02T14:08:54,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741840_1016 (size=12509) 2024-12-02T14:08:54,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/82b5d3d4cbdc4569b55c1afb82f07bd3 2024-12-02T14:08:54,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/82b5d3d4cbdc4569b55c1afb82f07bd3 as hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/82b5d3d4cbdc4569b55c1afb82f07bd3 2024-12-02T14:08:54,363 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/82b5d3d4cbdc4569b55c1afb82f07bd3, entries=7, sequenceid=21, filesize=12.2 K 2024-12-02T14:08:54,566 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:08:54,566 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ddd527f5248d4da9c8e9f317318eaae2 in 444ms, sequenceid=21, compaction requested=false 2024-12-02T14:08:54,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ddd527f5248d4da9c8e9f317318eaae2: 2024-12-02T14:08:54,567 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-02T14:08:54,567 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:08:54,569 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/db8facbd0b53409881c42406d3a43f75 because midkey is the same as first or last row 2024-12-02T14:08:56,327 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:08:56,718 INFO [master/a3a61c9ba14f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T14:08:56,718 INFO [master/a3a61c9ba14f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T14:08:58,533 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:08:58,535 WARN [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:08:58,536 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C39167%2C1733148494949:(num 1733148527078) roll requested 2024-12-02T14:08:58,537 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C39167%2C1733148494949.1733148538537 2024-12-02T14:08:58,750 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:08:58,750 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:58,751 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:58,751 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:58,751 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:58,751 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:08:58,752 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148527078 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148538537 2024-12-02T14:08:58,753 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38457:38457),(127.0.0.1/127.0.0.1:41385:41385)] 2024-12-02T14:08:58,753 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148527078 is not closed yet, will try archiving it next time 2024-12-02T14:08:58,753 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148506980 to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/oldWALs/a3a61c9ba14f%2C39167%2C1733148494949.1733148506980 2024-12-02T14:08:58,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741839_1015 (size=7739) 2024-12-02T14:08:58,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741839_1015 (size=7739) 2024-12-02T14:09:00,741 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK]] 2024-12-02T14:09:02,283 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ddd527f5248d4da9c8e9f317318eaae2, had cached 0 bytes from a total of 25018 2024-12-02T14:09:02,947 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK]] 2024-12-02T14:09:05,154 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK]] 2024-12-02T14:09:07,363 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK]] 2024-12-02T14:09:09,367 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T14:09:09,367 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C39167%2C1733148494949.1733148549367 2024-12-02T14:09:13,581 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T14:09:14,380 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK]] 2024-12-02T14:09:14,383 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK], DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK]] 2024-12-02T14:09:14,383 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C39167%2C1733148494949:(num 1733148549367) roll requested 2024-12-02T14:09:14,383 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:14,384 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:14,384 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:14,384 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:14,384 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:14,384 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148538537 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148549367 2024-12-02T14:09:14,385 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41385:41385),(127.0.0.1/127.0.0.1:38457:38457)] 2024-12-02T14:09:14,385 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148538537 is not closed yet, will try archiving it next time 2024-12-02T14:09:14,385 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C39167%2C1733148494949.1733148554385 2024-12-02T14:09:14,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741841_1017 (size=4753) 2024-12-02T14:09:14,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741841_1017 (size=4753) 2024-12-02T14:09:19,390 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:09:19,390 WARN [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:09:19,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39167 {}] regionserver.HRegion(8855): Flush requested on ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:09:19,392 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ddd527f5248d4da9c8e9f317318eaae2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T14:09:19,398 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:09:19,398 WARN [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:09:21,393 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T14:09:24,396 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:09:24,397 WARN [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:09:24,397 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:24,398 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:24,398 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:24,399 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:24,399 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:24,399 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148549367 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148554385 2024-12-02T14:09:24,401 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41385:41385),(127.0.0.1/127.0.0.1:38457:38457)] 2024-12-02T14:09:24,401 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148549367 is not closed yet, will try archiving it next time 2024-12-02T14:09:24,401 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C39167%2C1733148494949:(num 1733148554385) roll requested 2024-12-02T14:09:24,402 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C39167%2C1733148494949.1733148564402 2024-12-02T14:09:24,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741842_1018 (size=1569) 2024-12-02T14:09:24,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741842_1018 (size=1569) 2024-12-02T14:09:24,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/6e619e0088a745fb8d6c3ed576ecb08e is 1080, key is row0015/info:/1733148536123/Put/seqid=0 2024-12-02T14:09:24,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741844_1020 (size=12509) 2024-12-02T14:09:24,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741844_1020 (size=12509) 2024-12-02T14:09:24,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/6e619e0088a745fb8d6c3ed576ecb08e 2024-12-02T14:09:24,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/6e619e0088a745fb8d6c3ed576ecb08e as hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/6e619e0088a745fb8d6c3ed576ecb08e 2024-12-02T14:09:24,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/6e619e0088a745fb8d6c3ed576ecb08e, entries=7, sequenceid=31, filesize=12.2 K 2024-12-02T14:09:29,419 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5013 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:09:29,419 WARN [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5013 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:09:29,436 INFO [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:09:29,437 WARN [FSHLog-0-hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1-prefix:a3a61c9ba14f,39167,1733148494949 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37629,DS-bbcc67f8-9d66-46b2-bc7c-f3a841391132,DISK], DatanodeInfoWithStorage[127.0.0.1:45893,DS-f312db39-1fda-474d-bb02-ca866b5f1911,DISK]] 2024-12-02T14:09:29,437 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,437 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ddd527f5248d4da9c8e9f317318eaae2 in 10045ms, sequenceid=31, compaction requested=true 2024-12-02T14:09:29,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ddd527f5248d4da9c8e9f317318eaae2: 2024-12-02T14:09:29,437 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,438 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,438 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-02T14:09:29,438 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:09:29,438 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,438 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/db8facbd0b53409881c42406d3a43f75 because midkey is the same as first or last row 2024-12-02T14:09:29,438 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,439 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148554385 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148564402 2024-12-02T14:09:29,441 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41385:41385),(127.0.0.1/127.0.0.1:38457:38457)] 2024-12-02T14:09:29,441 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148554385 is not closed yet, will try archiving it next time 2024-12-02T14:09:29,441 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148527078 to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/oldWALs/a3a61c9ba14f%2C39167%2C1733148494949.1733148527078 2024-12-02T14:09:29,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ddd527f5248d4da9c8e9f317318eaae2:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T14:09:29,442 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C39167%2C1733148494949:(num 1733148564402) roll requested 2024-12-02T14:09:29,442 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C39167%2C1733148494949.1733148569442 2024-12-02T14:09:29,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741843_1019 (size=438) 2024-12-02T14:09:29,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741843_1019 (size=438) 2024-12-02T14:09:29,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:09:29,446 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148538537 to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/oldWALs/a3a61c9ba14f%2C39167%2C1733148494949.1733148538537 2024-12-02T14:09:29,446 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:09:29,448 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148549367 to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/oldWALs/a3a61c9ba14f%2C39167%2C1733148494949.1733148549367 2024-12-02T14:09:29,449 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T14:09:29,450 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.HStore(1541): ddd527f5248d4da9c8e9f317318eaae2/info is initiating minor compaction (all files) 2024-12-02T14:09:29,450 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,451 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,451 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,451 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,451 INFO [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ddd527f5248d4da9c8e9f317318eaae2/info in TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. 2024-12-02T14:09:29,451 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,451 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148564402 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148569442 2024-12-02T14:09:29,451 INFO [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/db8facbd0b53409881c42406d3a43f75, hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/82b5d3d4cbdc4569b55c1afb82f07bd3, hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/6e619e0088a745fb8d6c3ed576ecb08e] into tmpdir=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp, totalSize=36.6 K 2024-12-02T14:09:29,453 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] compactions.Compactor(225): Compacting db8facbd0b53409881c42406d3a43f75, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733148507004 2024-12-02T14:09:29,454 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] compactions.Compactor(225): Compacting 82b5d3d4cbdc4569b55c1afb82f07bd3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733148521058 2024-12-02T14:09:29,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741845_1021 (size=93) 2024-12-02T14:09:29,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741845_1021 (size=93) 2024-12-02T14:09:29,455 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6e619e0088a745fb8d6c3ed576ecb08e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733148536123 2024-12-02T14:09:29,455 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148554385 is not closed yet, will try archiving it next time 2024-12-02T14:09:29,455 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148564402 to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/oldWALs/a3a61c9ba14f%2C39167%2C1733148494949.1733148564402 2024-12-02T14:09:29,463 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41385:41385),(127.0.0.1/127.0.0.1:38457:38457)] 2024-12-02T14:09:29,463 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148554385 is not closed yet, will try archiving it next time 2024-12-02T14:09:29,464 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C39167%2C1733148494949.1733148569464 2024-12-02T14:09:29,478 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,478 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,479 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,479 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,479 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:29,479 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148569442 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148569464 2024-12-02T14:09:29,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741846_1022 (size=1258) 2024-12-02T14:09:29,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741846_1022 (size=1258) 2024-12-02T14:09:29,484 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38457:38457),(127.0.0.1/127.0.0.1:41385:41385)] 2024-12-02T14:09:29,484 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148554385 is not closed yet, will try archiving it next time 2024-12-02T14:09:29,484 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148569442 is not closed yet, will try archiving it next time 2024-12-02T14:09:29,485 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148554385 is not closed yet, will try archiving it next time 2024-12-02T14:09:29,489 INFO [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ddd527f5248d4da9c8e9f317318eaae2#info#compaction#3 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:09:29,490 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/3d6e2cbc888f466e9734662dfca5d597 is 1080, key is row0001/info:/1733148507004/Put/seqid=0 2024-12-02T14:09:29,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741848_1024 (size=27710) 2024-12-02T14:09:29,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741848_1024 (size=27710) 2024-12-02T14:09:29,511 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/3d6e2cbc888f466e9734662dfca5d597 as hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/3d6e2cbc888f466e9734662dfca5d597 2024-12-02T14:09:29,528 INFO [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ddd527f5248d4da9c8e9f317318eaae2/info of ddd527f5248d4da9c8e9f317318eaae2 into 3d6e2cbc888f466e9734662dfca5d597(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:09:29,528 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ddd527f5248d4da9c8e9f317318eaae2: 2024-12-02T14:09:29,531 INFO [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2., storeName=ddd527f5248d4da9c8e9f317318eaae2/info, priority=13, startTime=1733148569441; duration=0sec 2024-12-02T14:09:29,531 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-02T14:09:29,531 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:09:29,532 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/3d6e2cbc888f466e9734662dfca5d597 because midkey is the same as first or last row 2024-12-02T14:09:29,532 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-02T14:09:29,532 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:09:29,532 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/3d6e2cbc888f466e9734662dfca5d597 because midkey is the same as first or last row 2024-12-02T14:09:29,532 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-02T14:09:29,533 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:09:29,533 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/3d6e2cbc888f466e9734662dfca5d597 because midkey is the same as first or last row 2024-12-02T14:09:29,533 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:09:29,533 DEBUG [RS:0;a3a61c9ba14f:39167-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ddd527f5248d4da9c8e9f317318eaae2:info 2024-12-02T14:09:29,848 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/WALs/a3a61c9ba14f,39167,1733148494949/a3a61c9ba14f%2C39167%2C1733148494949.1733148554385 to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/oldWALs/a3a61c9ba14f%2C39167%2C1733148494949.1733148554385 2024-12-02T14:09:41,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39167 {}] regionserver.HRegion(8855): Flush requested on ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:09:41,512 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ddd527f5248d4da9c8e9f317318eaae2 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T14:09:41,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/c4002208c3f14d79b710d2a971a994d0 is 1080, key is row0022/info:/1733148569465/Put/seqid=0 2024-12-02T14:09:41,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741849_1025 (size=12509) 2024-12-02T14:09:41,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741849_1025 (size=12509) 2024-12-02T14:09:41,532 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/c4002208c3f14d79b710d2a971a994d0 2024-12-02T14:09:41,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/c4002208c3f14d79b710d2a971a994d0 as hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/c4002208c3f14d79b710d2a971a994d0 2024-12-02T14:09:41,551 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/c4002208c3f14d79b710d2a971a994d0, entries=7, sequenceid=42, filesize=12.2 K 2024-12-02T14:09:41,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ddd527f5248d4da9c8e9f317318eaae2 in 42ms, sequenceid=42, compaction requested=false 2024-12-02T14:09:41,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ddd527f5248d4da9c8e9f317318eaae2: 2024-12-02T14:09:41,553 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-02T14:09:41,553 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:09:41,553 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/3d6e2cbc888f466e9734662dfca5d597 because midkey is the same as first or last row 2024-12-02T14:09:43,581 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T14:09:47,284 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ddd527f5248d4da9c8e9f317318eaae2, had cached 0 bytes from a total of 40219 2024-12-02T14:09:49,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T14:09:49,534 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:09:49,534 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:09:49,541 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:09:49,542 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:09:49,542 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T14:09:49,542 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T14:09:49,542 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1904156029, stopped=false 2024-12-02T14:09:49,542 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a3a61c9ba14f,45661,1733148494419 2024-12-02T14:09:49,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:09:49,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:09:49,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:49,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:49,544 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:09:49,544 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:09:49,544 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:09:49,544 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:09:49,544 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:09:49,544 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:09:49,545 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a3a61c9ba14f,39167,1733148494949' ***** 2024-12-02T14:09:49,545 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T14:09:49,545 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T14:09:49,545 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T14:09:49,545 INFO [RS:0;a3a61c9ba14f:39167 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T14:09:49,546 INFO [RS:0;a3a61c9ba14f:39167 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T14:09:49,546 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(3091): Received CLOSE for ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:09:49,546 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(959): stopping server a3a61c9ba14f,39167,1733148494949 2024-12-02T14:09:49,546 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:09:49,546 INFO [RS:0;a3a61c9ba14f:39167 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a3a61c9ba14f:39167. 2024-12-02T14:09:49,547 DEBUG [RS:0;a3a61c9ba14f:39167 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:09:49,547 DEBUG [RS:0;a3a61c9ba14f:39167 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:09:49,547 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ddd527f5248d4da9c8e9f317318eaae2, disabling compactions & flushes 2024-12-02T14:09:49,547 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T14:09:49,547 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. 2024-12-02T14:09:49,547 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T14:09:49,547 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. 2024-12-02T14:09:49,547 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T14:09:49,547 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. after waiting 0 ms 2024-12-02T14:09:49,547 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. 2024-12-02T14:09:49,547 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T14:09:49,547 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ddd527f5248d4da9c8e9f317318eaae2 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-02T14:09:49,547 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-02T14:09:49,548 DEBUG [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(1325): Online Regions={ddd527f5248d4da9c8e9f317318eaae2=TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2., 1588230740=hbase:meta,,1.1588230740} 2024-12-02T14:09:49,548 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:09:49,548 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:09:49,548 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:09:49,548 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:09:49,548 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:09:49,548 DEBUG [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ddd527f5248d4da9c8e9f317318eaae2 2024-12-02T14:09:49,548 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-02T14:09:49,553 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/3b2254a1bdd34e549d0ba508577111b3 is 1080, key is row0029/info:/1733148583515/Put/seqid=0 2024-12-02T14:09:49,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741850_1026 (size=8193) 2024-12-02T14:09:49,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741850_1026 (size=8193) 2024-12-02T14:09:49,561 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/3b2254a1bdd34e549d0ba508577111b3 2024-12-02T14:09:49,570 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/.tmp/info/f668e20a81e246f4bb2096e02f2ddd5c is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2./info:regioninfo/1733148497307/Put/seqid=0 2024-12-02T14:09:49,572 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/.tmp/info/3b2254a1bdd34e549d0ba508577111b3 as hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/3b2254a1bdd34e549d0ba508577111b3 2024-12-02T14:09:49,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741851_1027 (size=7016) 2024-12-02T14:09:49,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741851_1027 (size=7016) 2024-12-02T14:09:49,577 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/.tmp/info/f668e20a81e246f4bb2096e02f2ddd5c 2024-12-02T14:09:49,582 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/3b2254a1bdd34e549d0ba508577111b3, entries=3, sequenceid=48, filesize=8.0 K 2024-12-02T14:09:49,584 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for ddd527f5248d4da9c8e9f317318eaae2 in 37ms, sequenceid=48, compaction requested=true 2024-12-02T14:09:49,585 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/db8facbd0b53409881c42406d3a43f75, hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/82b5d3d4cbdc4569b55c1afb82f07bd3, hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/6e619e0088a745fb8d6c3ed576ecb08e] to archive 2024-12-02T14:09:49,588 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T14:09:49,591 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/db8facbd0b53409881c42406d3a43f75 to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/db8facbd0b53409881c42406d3a43f75 2024-12-02T14:09:49,593 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/82b5d3d4cbdc4569b55c1afb82f07bd3 to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/82b5d3d4cbdc4569b55c1afb82f07bd3 2024-12-02T14:09:49,595 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/6e619e0088a745fb8d6c3ed576ecb08e to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/info/6e619e0088a745fb8d6c3ed576ecb08e 2024-12-02T14:09:49,602 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/.tmp/ns/43417060636c4111825e6ff86514411f is 43, key is default/ns:d/1733148496639/Put/seqid=0 2024-12-02T14:09:49,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741852_1028 (size=5153) 2024-12-02T14:09:49,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741852_1028 (size=5153) 2024-12-02T14:09:49,610 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/.tmp/ns/43417060636c4111825e6ff86514411f 2024-12-02T14:09:49,606 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=a3a61c9ba14f:45661 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-02T14:09:49,611 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [db8facbd0b53409881c42406d3a43f75=12509, 82b5d3d4cbdc4569b55c1afb82f07bd3=12509, 6e619e0088a745fb8d6c3ed576ecb08e=12509] 2024-12-02T14:09:49,616 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/default/TestLogRolling-testSlowSyncLogRolling/ddd527f5248d4da9c8e9f317318eaae2/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-02T14:09:49,619 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. 2024-12-02T14:09:49,619 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ddd527f5248d4da9c8e9f317318eaae2: Waiting for close lock at 1733148589546Running coprocessor pre-close hooks at 1733148589547 (+1 ms)Disabling compacts and flushes for region at 1733148589547Disabling writes for close at 1733148589547Obtaining lock to block concurrent updates at 1733148589547Preparing flush snapshotting stores in ddd527f5248d4da9c8e9f317318eaae2 at 1733148589547Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733148589548 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. at 1733148589549 (+1 ms)Flushing ddd527f5248d4da9c8e9f317318eaae2/info: creating writer at 1733148589549Flushing ddd527f5248d4da9c8e9f317318eaae2/info: appending metadata at 1733148589553 (+4 ms)Flushing ddd527f5248d4da9c8e9f317318eaae2/info: closing flushed file at 1733148589553Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@18696afe: reopening flushed file at 1733148589571 (+18 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for ddd527f5248d4da9c8e9f317318eaae2 in 37ms, sequenceid=48, compaction requested=true at 1733148589584 (+13 ms)Writing region close event to WAL at 1733148589612 (+28 ms)Running coprocessor post-close hooks at 1733148589617 (+5 ms)Closed at 1733148589619 (+2 ms) 2024-12-02T14:09:49,619 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733148496844.ddd527f5248d4da9c8e9f317318eaae2. 2024-12-02T14:09:49,632 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/.tmp/table/8b4f8aeb11ad4201a94dcb612c484176 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733148497322/Put/seqid=0 2024-12-02T14:09:49,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741853_1029 (size=5396) 2024-12-02T14:09:49,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741853_1029 (size=5396) 2024-12-02T14:09:49,638 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/.tmp/table/8b4f8aeb11ad4201a94dcb612c484176 2024-12-02T14:09:49,646 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/.tmp/info/f668e20a81e246f4bb2096e02f2ddd5c as hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/info/f668e20a81e246f4bb2096e02f2ddd5c 2024-12-02T14:09:49,654 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/info/f668e20a81e246f4bb2096e02f2ddd5c, entries=10, sequenceid=11, filesize=6.9 K 2024-12-02T14:09:49,655 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/.tmp/ns/43417060636c4111825e6ff86514411f as hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/ns/43417060636c4111825e6ff86514411f 2024-12-02T14:09:49,663 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/ns/43417060636c4111825e6ff86514411f, entries=2, sequenceid=11, filesize=5.0 K 2024-12-02T14:09:49,665 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/.tmp/table/8b4f8aeb11ad4201a94dcb612c484176 as hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/table/8b4f8aeb11ad4201a94dcb612c484176 2024-12-02T14:09:49,672 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/table/8b4f8aeb11ad4201a94dcb612c484176, entries=2, sequenceid=11, filesize=5.3 K 2024-12-02T14:09:49,673 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=11, compaction requested=false 2024-12-02T14:09:49,679 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-02T14:09:49,680 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T14:09:49,680 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:09:49,680 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733148589547Running coprocessor pre-close hooks at 1733148589547Disabling compacts and flushes for region at 1733148589547Disabling writes for close at 1733148589548 (+1 ms)Obtaining lock to block concurrent updates at 1733148589548Preparing flush snapshotting stores in 1588230740 at 1733148589548Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733148589548Flushing stores of hbase:meta,,1.1588230740 at 1733148589549 (+1 ms)Flushing 1588230740/info: creating writer at 1733148589549Flushing 1588230740/info: appending metadata at 1733148589569 (+20 ms)Flushing 1588230740/info: closing flushed file at 1733148589569Flushing 1588230740/ns: creating writer at 1733148589586 (+17 ms)Flushing 1588230740/ns: appending metadata at 1733148589602 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733148589602Flushing 1588230740/table: creating writer at 1733148589618 (+16 ms)Flushing 1588230740/table: appending metadata at 1733148589631 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733148589631Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@361a6d7f: reopening flushed file at 1733148589645 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@299c2002: reopening flushed file at 1733148589654 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@753e67be: reopening flushed file at 1733148589664 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 125ms, sequenceid=11, compaction requested=false at 1733148589673 (+9 ms)Writing region close event to WAL at 1733148589675 (+2 ms)Running coprocessor post-close hooks at 1733148589679 (+4 ms)Closed at 1733148589680 (+1 ms) 2024-12-02T14:09:49,680 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T14:09:49,748 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(976): stopping server a3a61c9ba14f,39167,1733148494949; all regions closed. 2024-12-02T14:09:49,751 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,752 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,752 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,752 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,753 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741834_1010 (size=3066) 2024-12-02T14:09:49,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741834_1010 (size=3066) 2024-12-02T14:09:49,761 DEBUG [RS:0;a3a61c9ba14f:39167 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/oldWALs 2024-12-02T14:09:49,761 INFO [RS:0;a3a61c9ba14f:39167 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C39167%2C1733148494949.meta:.meta(num 1733148496509) 2024-12-02T14:09:49,762 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,762 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,762 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,762 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,762 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741847_1023 (size=12695) 2024-12-02T14:09:49,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741847_1023 (size=12695) 2024-12-02T14:09:49,768 DEBUG [RS:0;a3a61c9ba14f:39167 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/oldWALs 2024-12-02T14:09:49,768 INFO [RS:0;a3a61c9ba14f:39167 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C39167%2C1733148494949:(num 1733148569464) 2024-12-02T14:09:49,768 DEBUG [RS:0;a3a61c9ba14f:39167 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:09:49,768 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:09:49,768 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:09:49,769 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.ChoreService(370): Chore service for: regionserver/a3a61c9ba14f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T14:09:49,769 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:09:49,769 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:09:49,769 INFO [RS:0;a3a61c9ba14f:39167 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39167 2024-12-02T14:09:49,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a3a61c9ba14f,39167,1733148494949 2024-12-02T14:09:49,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:09:49,772 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:09:49,773 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a3a61c9ba14f,39167,1733148494949] 2024-12-02T14:09:49,774 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a3a61c9ba14f,39167,1733148494949 already deleted, retry=false 2024-12-02T14:09:49,774 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a3a61c9ba14f,39167,1733148494949 expired; onlineServers=0 2024-12-02T14:09:49,774 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a3a61c9ba14f,45661,1733148494419' ***** 2024-12-02T14:09:49,774 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T14:09:49,775 INFO [M:0;a3a61c9ba14f:45661 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:09:49,775 INFO [M:0;a3a61c9ba14f:45661 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:09:49,775 DEBUG [M:0;a3a61c9ba14f:45661 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T14:09:49,775 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T14:09:49,775 DEBUG [M:0;a3a61c9ba14f:45661 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T14:09:49,775 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148495767 {}] cleaner.HFileCleaner(306): Exit Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148495767,5,FailOnTimeoutGroup] 2024-12-02T14:09:49,775 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148495764 {}] cleaner.HFileCleaner(306): Exit Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148495764,5,FailOnTimeoutGroup] 2024-12-02T14:09:49,775 INFO [M:0;a3a61c9ba14f:45661 {}] hbase.ChoreService(370): Chore service for: master/a3a61c9ba14f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T14:09:49,775 INFO [M:0;a3a61c9ba14f:45661 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:09:49,775 DEBUG [M:0;a3a61c9ba14f:45661 {}] master.HMaster(1795): Stopping service threads 2024-12-02T14:09:49,775 INFO [M:0;a3a61c9ba14f:45661 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T14:09:49,775 INFO [M:0;a3a61c9ba14f:45661 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:09:49,776 INFO [M:0;a3a61c9ba14f:45661 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T14:09:49,776 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T14:09:49,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T14:09:49,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:49,776 DEBUG [M:0;a3a61c9ba14f:45661 {}] zookeeper.ZKUtil(347): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T14:09:49,777 WARN [M:0;a3a61c9ba14f:45661 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T14:09:49,777 INFO [M:0;a3a61c9ba14f:45661 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/.lastflushedseqids 2024-12-02T14:09:49,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741854_1030 (size=130) 2024-12-02T14:09:49,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741854_1030 (size=130) 2024-12-02T14:09:49,789 INFO [M:0;a3a61c9ba14f:45661 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T14:09:49,789 INFO [M:0;a3a61c9ba14f:45661 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T14:09:49,789 DEBUG [M:0;a3a61c9ba14f:45661 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:09:49,789 INFO [M:0;a3a61c9ba14f:45661 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:49,789 DEBUG [M:0;a3a61c9ba14f:45661 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:49,789 DEBUG [M:0;a3a61c9ba14f:45661 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:09:49,789 DEBUG [M:0;a3a61c9ba14f:45661 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:49,790 INFO [M:0;a3a61c9ba14f:45661 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-12-02T14:09:49,807 DEBUG [M:0;a3a61c9ba14f:45661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1ee006499e374a7280ab16ea506875ef is 82, key is hbase:meta,,1/info:regioninfo/1733148496577/Put/seqid=0 2024-12-02T14:09:49,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741855_1031 (size=5672) 2024-12-02T14:09:49,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741855_1031 (size=5672) 2024-12-02T14:09:49,814 INFO [M:0;a3a61c9ba14f:45661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1ee006499e374a7280ab16ea506875ef 2024-12-02T14:09:49,837 DEBUG [M:0;a3a61c9ba14f:45661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1c4334b715ba4e58a183ad4e6e88bb29 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733148497330/Put/seqid=0 2024-12-02T14:09:49,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741856_1032 (size=6247) 2024-12-02T14:09:49,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741856_1032 (size=6247) 2024-12-02T14:09:49,843 INFO [M:0;a3a61c9ba14f:45661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1c4334b715ba4e58a183ad4e6e88bb29 2024-12-02T14:09:49,849 INFO [M:0;a3a61c9ba14f:45661 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1c4334b715ba4e58a183ad4e6e88bb29 2024-12-02T14:09:49,864 DEBUG [M:0;a3a61c9ba14f:45661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b0f2ca4fe3d34e3db5ce2d787a64699c is 69, key is a3a61c9ba14f,39167,1733148494949/rs:state/1733148495850/Put/seqid=0 2024-12-02T14:09:49,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741857_1033 (size=5156) 2024-12-02T14:09:49,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741857_1033 (size=5156) 2024-12-02T14:09:49,871 INFO [M:0;a3a61c9ba14f:45661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b0f2ca4fe3d34e3db5ce2d787a64699c 2024-12-02T14:09:49,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:09:49,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39167-0x1009b42a3c00001, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:09:49,875 INFO [RS:0;a3a61c9ba14f:39167 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:09:49,875 INFO [RS:0;a3a61c9ba14f:39167 {}] regionserver.HRegionServer(1031): Exiting; stopping=a3a61c9ba14f,39167,1733148494949; zookeeper connection closed. 2024-12-02T14:09:49,875 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5ecc829c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5ecc829c 2024-12-02T14:09:49,876 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T14:09:49,893 DEBUG [M:0;a3a61c9ba14f:45661 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fe6ee45eb9934a148c41f6c7ddcf4710 is 52, key is load_balancer_on/state:d/1733148496824/Put/seqid=0 2024-12-02T14:09:49,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741858_1034 (size=5056) 2024-12-02T14:09:49,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741858_1034 (size=5056) 2024-12-02T14:09:49,899 INFO [M:0;a3a61c9ba14f:45661 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fe6ee45eb9934a148c41f6c7ddcf4710 2024-12-02T14:09:49,906 DEBUG [M:0;a3a61c9ba14f:45661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1ee006499e374a7280ab16ea506875ef as hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1ee006499e374a7280ab16ea506875ef 2024-12-02T14:09:49,913 INFO [M:0;a3a61c9ba14f:45661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1ee006499e374a7280ab16ea506875ef, entries=8, sequenceid=59, filesize=5.5 K 2024-12-02T14:09:49,915 DEBUG [M:0;a3a61c9ba14f:45661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1c4334b715ba4e58a183ad4e6e88bb29 as hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1c4334b715ba4e58a183ad4e6e88bb29 2024-12-02T14:09:49,922 INFO [M:0;a3a61c9ba14f:45661 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1c4334b715ba4e58a183ad4e6e88bb29 2024-12-02T14:09:49,922 INFO [M:0;a3a61c9ba14f:45661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1c4334b715ba4e58a183ad4e6e88bb29, entries=6, sequenceid=59, filesize=6.1 K 2024-12-02T14:09:49,923 DEBUG [M:0;a3a61c9ba14f:45661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b0f2ca4fe3d34e3db5ce2d787a64699c as hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b0f2ca4fe3d34e3db5ce2d787a64699c 2024-12-02T14:09:49,928 INFO [regionserver/a3a61c9ba14f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:09:49,931 INFO [M:0;a3a61c9ba14f:45661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b0f2ca4fe3d34e3db5ce2d787a64699c, entries=1, sequenceid=59, filesize=5.0 K 2024-12-02T14:09:49,933 DEBUG [M:0;a3a61c9ba14f:45661 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/fe6ee45eb9934a148c41f6c7ddcf4710 as hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fe6ee45eb9934a148c41f6c7ddcf4710 2024-12-02T14:09:49,939 INFO [M:0;a3a61c9ba14f:45661 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/fe6ee45eb9934a148c41f6c7ddcf4710, entries=1, sequenceid=59, filesize=4.9 K 2024-12-02T14:09:49,941 INFO [M:0;a3a61c9ba14f:45661 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 151ms, sequenceid=59, compaction requested=false 2024-12-02T14:09:49,942 INFO [M:0;a3a61c9ba14f:45661 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:49,942 DEBUG [M:0;a3a61c9ba14f:45661 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733148589789Disabling compacts and flushes for region at 1733148589789Disabling writes for close at 1733148589789Obtaining lock to block concurrent updates at 1733148589790 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733148589790Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1733148589790Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733148589791 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733148589791Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733148589807 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733148589807Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733148589820 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733148589836 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733148589836Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733148589849 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733148589863 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733148589863Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733148589878 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733148589892 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733148589892Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cc03057: reopening flushed file at 1733148589905 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66b5d8ab: reopening flushed file at 1733148589914 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a21af2c: reopening flushed file at 1733148589922 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76f21dc8: reopening flushed file at 1733148589931 (+9 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 151ms, sequenceid=59, compaction requested=false at 1733148589941 (+10 ms)Writing region close event to WAL at 1733148589942 (+1 ms)Closed at 1733148589942 2024-12-02T14:09:49,943 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,943 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,943 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,944 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,944 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:49,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37629 is added to blk_1073741830_1006 (size=27973) 2024-12-02T14:09:49,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45893 is added to blk_1073741830_1006 (size=27973) 2024-12-02T14:09:49,947 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:09:49,947 INFO [M:0;a3a61c9ba14f:45661 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T14:09:49,947 INFO [M:0;a3a61c9ba14f:45661 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45661 2024-12-02T14:09:49,947 INFO [M:0;a3a61c9ba14f:45661 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:09:50,049 INFO [M:0;a3a61c9ba14f:45661 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:09:50,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:09:50,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45661-0x1009b42a3c00000, quorum=127.0.0.1:52500, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:09:50,057 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:09:50,060 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:09:50,060 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:09:50,060 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:09:50,060 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/hadoop.log.dir/,STOPPED} 2024-12-02T14:09:50,064 WARN [BP-207308689-172.17.0.2-1733148492023 heartbeating to localhost/127.0.0.1:42389 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:09:50,064 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:09:50,064 WARN [BP-207308689-172.17.0.2-1733148492023 heartbeating to localhost/127.0.0.1:42389 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-207308689-172.17.0.2-1733148492023 (Datanode Uuid 0120331a-e668-4d65-b2c8-9a2f2cc54cb9) service to localhost/127.0.0.1:42389 2024-12-02T14:09:50,064 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:09:50,065 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/cluster_c7473284-84e7-c5cd-0f7b-2a3e05e24376/data/data3/current/BP-207308689-172.17.0.2-1733148492023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:09:50,065 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/cluster_c7473284-84e7-c5cd-0f7b-2a3e05e24376/data/data4/current/BP-207308689-172.17.0.2-1733148492023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:09:50,066 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:09:50,068 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:09:50,068 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:09:50,068 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:09:50,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:09:50,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/hadoop.log.dir/,STOPPED} 2024-12-02T14:09:50,070 WARN [BP-207308689-172.17.0.2-1733148492023 heartbeating to localhost/127.0.0.1:42389 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:09:50,070 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:09:50,070 WARN [BP-207308689-172.17.0.2-1733148492023 heartbeating to localhost/127.0.0.1:42389 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-207308689-172.17.0.2-1733148492023 (Datanode Uuid dbf527d8-df80-4746-bc2c-10385216274e) service to localhost/127.0.0.1:42389 2024-12-02T14:09:50,070 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:09:50,071 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/cluster_c7473284-84e7-c5cd-0f7b-2a3e05e24376/data/data1/current/BP-207308689-172.17.0.2-1733148492023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:09:50,071 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/cluster_c7473284-84e7-c5cd-0f7b-2a3e05e24376/data/data2/current/BP-207308689-172.17.0.2-1733148492023 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:09:50,071 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:09:50,080 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:09:50,080 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:09:50,080 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:09:50,081 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:09:50,081 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/hadoop.log.dir/,STOPPED} 2024-12-02T14:09:50,090 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T14:09:50,124 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T14:09:50,132 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:42389 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/a3a61c9ba14f:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42389 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42389 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42389 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42389 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42389 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/a3a61c9ba14f:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42389 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42389 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: regionserver/a3a61c9ba14f:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@18e1792d java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=405 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=49 (was 140), ProcessCount=11 (was 11), AvailableMemoryMB=6749 (was 7304) 2024-12-02T14:09:50,138 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=405, MaxFileDescriptor=1048576, SystemLoadAverage=49, ProcessCount=11, AvailableMemoryMB=6747 2024-12-02T14:09:50,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T14:09:50,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/hadoop.log.dir so I do NOT create it in target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650 2024-12-02T14:09:50,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbc59b46-a676-faa8-c37a-8aceb0e9b8d1/hadoop.tmp.dir so I do NOT create it in target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650 2024-12-02T14:09:50,138 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/cluster_f00d5ed4-71c3-0ac1-132a-8c2e1ba6a980, deleteOnExit=true 2024-12-02T14:09:50,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T14:09:50,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/test.cache.data in system properties and HBase conf 2024-12-02T14:09:50,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T14:09:50,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/hadoop.log.dir in system properties and HBase conf 2024-12-02T14:09:50,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T14:09:50,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T14:09:50,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T14:09:50,139 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T14:09:50,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:09:50,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:09:50,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T14:09:50,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:09:50,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T14:09:50,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T14:09:50,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:09:50,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:09:50,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T14:09:50,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/nfs.dump.dir in system properties and HBase conf 2024-12-02T14:09:50,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/java.io.tmpdir in system properties and HBase conf 2024-12-02T14:09:50,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:09:50,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T14:09:50,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T14:09:50,154 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T14:09:50,207 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:09:50,212 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:09:50,214 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:09:50,214 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:09:50,214 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T14:09:50,215 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:09:50,215 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bd9c5b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:09:50,216 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a3c3ceb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:09:50,308 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52b0c086{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/java.io.tmpdir/jetty-localhost-43955-hadoop-hdfs-3_4_1-tests_jar-_-any-1104309762318395234/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:09:50,309 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4d9de743{HTTP/1.1, (http/1.1)}{localhost:43955} 2024-12-02T14:09:50,309 INFO [Time-limited test {}] server.Server(415): Started @99972ms 2024-12-02T14:09:50,321 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T14:09:50,369 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:09:50,373 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:09:50,374 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:09:50,374 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:09:50,374 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T14:09:50,375 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55f7876e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:09:50,375 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@607b9bc6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:09:50,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30a1c2a3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/java.io.tmpdir/jetty-localhost-41033-hadoop-hdfs-3_4_1-tests_jar-_-any-14056152801824559601/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:09:50,466 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4620cd8a{HTTP/1.1, (http/1.1)}{localhost:41033} 2024-12-02T14:09:50,466 INFO [Time-limited test {}] server.Server(415): Started @100128ms 2024-12-02T14:09:50,467 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:09:50,500 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:09:50,505 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:09:50,507 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:09:50,507 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:09:50,507 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:09:50,507 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@463a48f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:09:50,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d944f53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:09:50,531 WARN [Thread-440 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/cluster_f00d5ed4-71c3-0ac1-132a-8c2e1ba6a980/data/data1/current/BP-545261947-172.17.0.2-1733148590165/current, will proceed with Du for space computation calculation, 2024-12-02T14:09:50,531 WARN [Thread-441 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/cluster_f00d5ed4-71c3-0ac1-132a-8c2e1ba6a980/data/data2/current/BP-545261947-172.17.0.2-1733148590165/current, will proceed with Du for space computation calculation, 2024-12-02T14:09:50,551 WARN [Thread-419 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:09:50,554 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x45c555425484e894 with lease ID 0xa8613957c8a72eee: Processing first storage report for DS-5a03eed3-face-4c2d-b5e3-82dba4618b4c from datanode DatanodeRegistration(127.0.0.1:32967, datanodeUuid=a26a0eea-8d7d-43ef-b904-1478eec4c2be, infoPort=39741, infoSecurePort=0, ipcPort=46603, storageInfo=lv=-57;cid=testClusterID;nsid=2099365708;c=1733148590165) 2024-12-02T14:09:50,554 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45c555425484e894 with lease ID 0xa8613957c8a72eee: from storage DS-5a03eed3-face-4c2d-b5e3-82dba4618b4c node DatanodeRegistration(127.0.0.1:32967, datanodeUuid=a26a0eea-8d7d-43ef-b904-1478eec4c2be, infoPort=39741, infoSecurePort=0, ipcPort=46603, storageInfo=lv=-57;cid=testClusterID;nsid=2099365708;c=1733148590165), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:09:50,554 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x45c555425484e894 with lease ID 0xa8613957c8a72eee: Processing first storage report for DS-095d00ab-4d05-436d-8949-ddca70bc63a2 from datanode DatanodeRegistration(127.0.0.1:32967, datanodeUuid=a26a0eea-8d7d-43ef-b904-1478eec4c2be, infoPort=39741, infoSecurePort=0, ipcPort=46603, storageInfo=lv=-57;cid=testClusterID;nsid=2099365708;c=1733148590165) 2024-12-02T14:09:50,554 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x45c555425484e894 with lease ID 0xa8613957c8a72eee: from storage DS-095d00ab-4d05-436d-8949-ddca70bc63a2 node DatanodeRegistration(127.0.0.1:32967, datanodeUuid=a26a0eea-8d7d-43ef-b904-1478eec4c2be, infoPort=39741, infoSecurePort=0, ipcPort=46603, storageInfo=lv=-57;cid=testClusterID;nsid=2099365708;c=1733148590165), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:09:50,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c708570{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/java.io.tmpdir/jetty-localhost-39375-hadoop-hdfs-3_4_1-tests_jar-_-any-13201868271705367454/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:09:50,606 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ab06e68{HTTP/1.1, (http/1.1)}{localhost:39375} 2024-12-02T14:09:50,606 INFO [Time-limited test {}] server.Server(415): Started @100269ms 2024-12-02T14:09:50,608 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:09:50,664 WARN [Thread-466 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/cluster_f00d5ed4-71c3-0ac1-132a-8c2e1ba6a980/data/data3/current/BP-545261947-172.17.0.2-1733148590165/current, will proceed with Du for space computation calculation, 2024-12-02T14:09:50,665 WARN [Thread-467 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/cluster_f00d5ed4-71c3-0ac1-132a-8c2e1ba6a980/data/data4/current/BP-545261947-172.17.0.2-1733148590165/current, will proceed with Du for space computation calculation, 2024-12-02T14:09:50,681 WARN [Thread-455 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:09:50,684 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc8db772ce7d553f5 with lease ID 0xa8613957c8a72eef: Processing first storage report for DS-08ebda77-3ccf-40ff-bbb8-2ad5d6ddc520 from datanode DatanodeRegistration(127.0.0.1:44313, datanodeUuid=ea5a8e05-35d5-4712-8fc8-1a30a2684b1d, infoPort=42633, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=2099365708;c=1733148590165) 2024-12-02T14:09:50,684 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc8db772ce7d553f5 with lease ID 0xa8613957c8a72eef: from storage DS-08ebda77-3ccf-40ff-bbb8-2ad5d6ddc520 node DatanodeRegistration(127.0.0.1:44313, datanodeUuid=ea5a8e05-35d5-4712-8fc8-1a30a2684b1d, infoPort=42633, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=2099365708;c=1733148590165), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:09:50,684 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc8db772ce7d553f5 with lease ID 0xa8613957c8a72eef: Processing first storage report for DS-b0cf8dcc-0d65-47f8-92a8-2dd563d29f8f from datanode DatanodeRegistration(127.0.0.1:44313, datanodeUuid=ea5a8e05-35d5-4712-8fc8-1a30a2684b1d, infoPort=42633, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=2099365708;c=1733148590165) 2024-12-02T14:09:50,684 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc8db772ce7d553f5 with lease ID 0xa8613957c8a72eef: from storage DS-b0cf8dcc-0d65-47f8-92a8-2dd563d29f8f node DatanodeRegistration(127.0.0.1:44313, datanodeUuid=ea5a8e05-35d5-4712-8fc8-1a30a2684b1d, infoPort=42633, infoSecurePort=0, ipcPort=36321, storageInfo=lv=-57;cid=testClusterID;nsid=2099365708;c=1733148590165), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:09:50,732 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650 2024-12-02T14:09:50,734 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/cluster_f00d5ed4-71c3-0ac1-132a-8c2e1ba6a980/zookeeper_0, clientPort=49223, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/cluster_f00d5ed4-71c3-0ac1-132a-8c2e1ba6a980/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/cluster_f00d5ed4-71c3-0ac1-132a-8c2e1ba6a980/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T14:09:50,735 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49223 2024-12-02T14:09:50,735 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:50,737 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:50,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:09:50,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:09:50,749 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d with version=8 2024-12-02T14:09:50,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/hbase-staging 2024-12-02T14:09:50,752 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:09:50,752 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:50,752 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:50,753 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:09:50,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:50,753 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:09:50,753 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T14:09:50,753 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:09:50,754 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41009 2024-12-02T14:09:50,756 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41009 connecting to ZooKeeper ensemble=127.0.0.1:49223 2024-12-02T14:09:50,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:410090x0, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:09:50,760 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41009-0x1009b441efc0000 connected 2024-12-02T14:09:50,772 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:50,774 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:50,778 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:09:50,778 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d, hbase.cluster.distributed=false 2024-12-02T14:09:50,780 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:09:50,781 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41009 2024-12-02T14:09:50,781 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41009 2024-12-02T14:09:50,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41009 2024-12-02T14:09:50,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41009 2024-12-02T14:09:50,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41009 2024-12-02T14:09:50,798 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:09:50,798 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:50,798 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:50,798 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:09:50,798 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:50,798 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:09:50,798 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T14:09:50,798 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:09:50,799 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43055 2024-12-02T14:09:50,800 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43055 connecting to ZooKeeper ensemble=127.0.0.1:49223 2024-12-02T14:09:50,801 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:50,803 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:50,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:430550x0, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:09:50,807 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:430550x0, quorum=127.0.0.1:49223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:09:50,807 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43055-0x1009b441efc0001 connected 2024-12-02T14:09:50,808 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T14:09:50,808 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T14:09:50,809 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T14:09:50,811 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:09:50,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43055 2024-12-02T14:09:50,818 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43055 2024-12-02T14:09:50,818 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43055 2024-12-02T14:09:50,820 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43055 2024-12-02T14:09:50,821 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43055 2024-12-02T14:09:50,833 DEBUG [M:0;a3a61c9ba14f:41009 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a3a61c9ba14f:41009 2024-12-02T14:09:50,833 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a3a61c9ba14f,41009,1733148590752 2024-12-02T14:09:50,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:09:50,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:09:50,835 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a3a61c9ba14f,41009,1733148590752 2024-12-02T14:09:50,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:50,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T14:09:50,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:50,837 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T14:09:50,837 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a3a61c9ba14f,41009,1733148590752 from backup master directory 2024-12-02T14:09:50,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a3a61c9ba14f,41009,1733148590752 2024-12-02T14:09:50,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:09:50,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:09:50,838 WARN [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:09:50,838 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a3a61c9ba14f,41009,1733148590752 2024-12-02T14:09:50,844 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/hbase.id] with ID: fec2fced-bd64-4f18-8215-7044e6243273 2024-12-02T14:09:50,844 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/.tmp/hbase.id 2024-12-02T14:09:50,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:09:50,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:09:50,852 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/.tmp/hbase.id]:[hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/hbase.id] 2024-12-02T14:09:50,868 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:50,868 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T14:09:50,870 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-02T14:09:50,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:50,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:50,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:09:50,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:09:50,882 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T14:09:50,882 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T14:09:50,883 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:09:50,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:09:50,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:09:50,893 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store 2024-12-02T14:09:50,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:09:50,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:09:50,902 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:09:50,902 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:09:50,902 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:50,902 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:50,903 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:09:50,903 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:50,903 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:50,903 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733148590902Disabling compacts and flushes for region at 1733148590902Disabling writes for close at 1733148590903 (+1 ms)Writing region close event to WAL at 1733148590903Closed at 1733148590903 2024-12-02T14:09:50,904 WARN [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/.initializing 2024-12-02T14:09:50,904 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/WALs/a3a61c9ba14f,41009,1733148590752 2024-12-02T14:09:50,907 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C41009%2C1733148590752, suffix=, logDir=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/WALs/a3a61c9ba14f,41009,1733148590752, archiveDir=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/oldWALs, maxLogs=10 2024-12-02T14:09:50,907 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C41009%2C1733148590752.1733148590907 2024-12-02T14:09:50,913 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/WALs/a3a61c9ba14f,41009,1733148590752/a3a61c9ba14f%2C41009%2C1733148590752.1733148590907 2024-12-02T14:09:50,917 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39741:39741),(127.0.0.1/127.0.0.1:42633:42633)] 2024-12-02T14:09:50,918 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:09:50,918 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:09:50,918 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:50,918 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:50,920 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:50,922 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T14:09:50,922 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:50,923 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:50,923 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:50,925 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T14:09:50,925 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:50,926 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:09:50,926 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:50,928 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T14:09:50,929 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:50,929 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:09:50,930 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:50,931 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T14:09:50,931 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:50,932 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:09:50,932 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:50,933 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:50,933 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:50,935 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:50,935 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:50,935 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T14:09:50,936 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:50,939 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:09:50,939 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689622, jitterRate=-0.12310130894184113}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T14:09:50,940 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733148590919Initializing all the Stores at 1733148590920 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148590920Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148590920Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148590920Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148590920Cleaning up temporary data from old regions at 1733148590935 (+15 ms)Region opened successfully at 1733148590940 (+5 ms) 2024-12-02T14:09:50,941 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T14:09:50,945 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71f02a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:09:50,946 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T14:09:50,946 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T14:09:50,946 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T14:09:50,946 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T14:09:50,947 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T14:09:50,947 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-02T14:09:50,948 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T14:09:50,951 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T14:09:50,952 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T14:09:50,953 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T14:09:50,953 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T14:09:50,954 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T14:09:50,955 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T14:09:50,955 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T14:09:50,956 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T14:09:50,957 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T14:09:50,958 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T14:09:50,958 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T14:09:50,960 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T14:09:50,961 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T14:09:50,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:09:50,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:09:50,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:50,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:50,963 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a3a61c9ba14f,41009,1733148590752, sessionid=0x1009b441efc0000, setting cluster-up flag (Was=false) 2024-12-02T14:09:50,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:50,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:50,969 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T14:09:50,970 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a3a61c9ba14f,41009,1733148590752 2024-12-02T14:09:50,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:50,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:50,976 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T14:09:50,977 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a3a61c9ba14f,41009,1733148590752 2024-12-02T14:09:50,979 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T14:09:50,981 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T14:09:50,981 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T14:09:50,981 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T14:09:50,981 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a3a61c9ba14f,41009,1733148590752 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T14:09:50,983 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:09:50,983 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:09:50,983 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:09:50,983 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:09:50,983 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a3a61c9ba14f:0, corePoolSize=10, maxPoolSize=10 2024-12-02T14:09:50,983 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:50,983 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:09:50,983 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:50,985 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733148620985 2024-12-02T14:09:50,985 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T14:09:50,986 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T14:09:50,986 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T14:09:50,986 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T14:09:50,986 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T14:09:50,986 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T14:09:50,986 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:50,986 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T14:09:50,986 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T14:09:50,987 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T14:09:50,987 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:09:50,987 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T14:09:50,987 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T14:09:50,987 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T14:09:50,987 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148590987,5,FailOnTimeoutGroup] 2024-12-02T14:09:50,987 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148590987,5,FailOnTimeoutGroup] 2024-12-02T14:09:50,987 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:50,988 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T14:09:50,988 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:50,988 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:50,988 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:50,988 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T14:09:50,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:09:50,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:09:50,996 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T14:09:50,996 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d 2024-12-02T14:09:51,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:09:51,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:09:51,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:09:51,011 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:09:51,012 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:09:51,012 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:51,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:51,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:09:51,014 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:09:51,015 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:51,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:51,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:09:51,017 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:09:51,017 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:51,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:51,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:09:51,019 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:09:51,019 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:51,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:51,020 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:09:51,021 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/data/hbase/meta/1588230740 2024-12-02T14:09:51,021 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/data/hbase/meta/1588230740 2024-12-02T14:09:51,023 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(746): ClusterId : fec2fced-bd64-4f18-8215-7044e6243273 2024-12-02T14:09:51,023 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:09:51,023 DEBUG [RS:0;a3a61c9ba14f:43055 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T14:09:51,023 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:09:51,024 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:09:51,025 DEBUG [RS:0;a3a61c9ba14f:43055 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T14:09:51,025 DEBUG [RS:0;a3a61c9ba14f:43055 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T14:09:51,026 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:09:51,027 DEBUG [RS:0;a3a61c9ba14f:43055 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T14:09:51,027 DEBUG [RS:0;a3a61c9ba14f:43055 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fc1f0d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:09:51,029 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:09:51,029 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=803319, jitterRate=0.021473243832588196}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:09:51,030 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733148591007Initializing all the Stores at 1733148591008 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148591008Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148591010 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148591010Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148591010Cleaning up temporary data from old regions at 1733148591023 (+13 ms)Region opened successfully at 1733148591030 (+7 ms) 2024-12-02T14:09:51,030 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:09:51,030 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:09:51,030 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:09:51,030 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:09:51,030 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:09:51,031 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:09:51,031 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733148591030Disabling compacts and flushes for region at 1733148591030Disabling writes for close at 1733148591030Writing region close event to WAL at 1733148591031 (+1 ms)Closed at 1733148591031 2024-12-02T14:09:51,032 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:09:51,032 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T14:09:51,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T14:09:51,034 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:09:51,036 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T14:09:51,039 DEBUG [RS:0;a3a61c9ba14f:43055 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a3a61c9ba14f:43055 2024-12-02T14:09:51,040 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T14:09:51,040 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T14:09:51,040 DEBUG [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T14:09:51,040 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(2659): reportForDuty to master=a3a61c9ba14f,41009,1733148590752 with port=43055, startcode=1733148590798 2024-12-02T14:09:51,041 DEBUG [RS:0;a3a61c9ba14f:43055 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T14:09:51,043 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46843, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T14:09:51,044 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41009 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a3a61c9ba14f,43055,1733148590798 2024-12-02T14:09:51,044 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41009 {}] master.ServerManager(517): Registering regionserver=a3a61c9ba14f,43055,1733148590798 2024-12-02T14:09:51,047 DEBUG [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d 2024-12-02T14:09:51,047 DEBUG [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33353 2024-12-02T14:09:51,047 DEBUG [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T14:09:51,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:09:51,049 DEBUG [RS:0;a3a61c9ba14f:43055 {}] zookeeper.ZKUtil(111): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a3a61c9ba14f,43055,1733148590798 2024-12-02T14:09:51,049 WARN [RS:0;a3a61c9ba14f:43055 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:09:51,049 INFO [RS:0;a3a61c9ba14f:43055 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:09:51,049 DEBUG [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/WALs/a3a61c9ba14f,43055,1733148590798 2024-12-02T14:09:51,050 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a3a61c9ba14f,43055,1733148590798] 2024-12-02T14:09:51,053 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T14:09:51,056 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T14:09:51,056 INFO [RS:0;a3a61c9ba14f:43055 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T14:09:51,056 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,057 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T14:09:51,058 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T14:09:51,058 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,058 DEBUG [RS:0;a3a61c9ba14f:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:51,058 DEBUG [RS:0;a3a61c9ba14f:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:51,058 DEBUG [RS:0;a3a61c9ba14f:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:51,058 DEBUG [RS:0;a3a61c9ba14f:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:51,058 DEBUG [RS:0;a3a61c9ba14f:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:51,058 DEBUG [RS:0;a3a61c9ba14f:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:09:51,058 DEBUG [RS:0;a3a61c9ba14f:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:51,059 DEBUG [RS:0;a3a61c9ba14f:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:51,059 DEBUG [RS:0;a3a61c9ba14f:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:51,059 DEBUG [RS:0;a3a61c9ba14f:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:51,059 DEBUG [RS:0;a3a61c9ba14f:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:51,059 DEBUG [RS:0;a3a61c9ba14f:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:51,059 DEBUG [RS:0;a3a61c9ba14f:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:09:51,059 DEBUG [RS:0;a3a61c9ba14f:43055 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:09:51,059 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,059 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,059 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,060 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,060 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,060 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,43055,1733148590798-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:09:51,073 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T14:09:51,073 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,43055,1733148590798-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,074 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,074 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.Replication(171): a3a61c9ba14f,43055,1733148590798 started 2024-12-02T14:09:51,086 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,086 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(1482): Serving as a3a61c9ba14f,43055,1733148590798, RpcServer on a3a61c9ba14f/172.17.0.2:43055, sessionid=0x1009b441efc0001 2024-12-02T14:09:51,086 DEBUG [RS:0;a3a61c9ba14f:43055 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T14:09:51,086 DEBUG [RS:0;a3a61c9ba14f:43055 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a3a61c9ba14f,43055,1733148590798 2024-12-02T14:09:51,086 DEBUG [RS:0;a3a61c9ba14f:43055 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,43055,1733148590798' 2024-12-02T14:09:51,086 DEBUG [RS:0;a3a61c9ba14f:43055 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T14:09:51,087 DEBUG [RS:0;a3a61c9ba14f:43055 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T14:09:51,088 DEBUG [RS:0;a3a61c9ba14f:43055 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T14:09:51,088 DEBUG [RS:0;a3a61c9ba14f:43055 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T14:09:51,088 DEBUG [RS:0;a3a61c9ba14f:43055 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a3a61c9ba14f,43055,1733148590798 2024-12-02T14:09:51,088 DEBUG [RS:0;a3a61c9ba14f:43055 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,43055,1733148590798' 2024-12-02T14:09:51,088 DEBUG [RS:0;a3a61c9ba14f:43055 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T14:09:51,088 DEBUG [RS:0;a3a61c9ba14f:43055 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T14:09:51,089 DEBUG [RS:0;a3a61c9ba14f:43055 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T14:09:51,089 INFO [RS:0;a3a61c9ba14f:43055 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T14:09:51,089 INFO [RS:0;a3a61c9ba14f:43055 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T14:09:51,186 WARN [a3a61c9ba14f:41009 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T14:09:51,195 INFO [RS:0;a3a61c9ba14f:43055 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C43055%2C1733148590798, suffix=, logDir=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/WALs/a3a61c9ba14f,43055,1733148590798, archiveDir=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/oldWALs, maxLogs=32 2024-12-02T14:09:51,201 INFO [RS:0;a3a61c9ba14f:43055 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C43055%2C1733148590798.1733148591200 2024-12-02T14:09:51,208 INFO [RS:0;a3a61c9ba14f:43055 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/WALs/a3a61c9ba14f,43055,1733148590798/a3a61c9ba14f%2C43055%2C1733148590798.1733148591200 2024-12-02T14:09:51,209 DEBUG [RS:0;a3a61c9ba14f:43055 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:39741:39741)] 2024-12-02T14:09:51,437 DEBUG [a3a61c9ba14f:41009 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T14:09:51,438 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a3a61c9ba14f,43055,1733148590798 2024-12-02T14:09:51,442 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a3a61c9ba14f,43055,1733148590798, state=OPENING 2024-12-02T14:09:51,445 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T14:09:51,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:51,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:51,449 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:09:51,449 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:09:51,449 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:09:51,449 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,43055,1733148590798}] 2024-12-02T14:09:51,605 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T14:09:51,611 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33471, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T14:09:51,617 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T14:09:51,618 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:09:51,620 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C43055%2C1733148590798.meta, suffix=.meta, logDir=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/WALs/a3a61c9ba14f,43055,1733148590798, archiveDir=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/oldWALs, maxLogs=32 2024-12-02T14:09:51,622 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C43055%2C1733148590798.meta.1733148591622.meta 2024-12-02T14:09:51,629 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/WALs/a3a61c9ba14f,43055,1733148590798/a3a61c9ba14f%2C43055%2C1733148590798.meta.1733148591622.meta 2024-12-02T14:09:51,631 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42633:42633),(127.0.0.1/127.0.0.1:39741:39741)] 2024-12-02T14:09:51,632 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:09:51,632 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T14:09:51,632 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T14:09:51,632 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T14:09:51,633 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T14:09:51,633 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:09:51,633 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T14:09:51,633 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T14:09:51,635 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:09:51,636 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:09:51,636 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:51,637 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:51,637 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:09:51,638 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:09:51,638 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:51,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:51,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:09:51,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:09:51,640 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:51,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:51,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:09:51,642 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:09:51,642 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:51,642 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:51,642 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:09:51,643 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/data/hbase/meta/1588230740 2024-12-02T14:09:51,644 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/data/hbase/meta/1588230740 2024-12-02T14:09:51,646 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:09:51,646 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:09:51,647 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:09:51,648 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:09:51,649 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700669, jitterRate=-0.10905437171459198}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:09:51,649 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T14:09:51,650 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733148591633Writing region info on filesystem at 1733148591633Initializing all the Stores at 1733148591634 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148591634Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148591635 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148591635Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148591635Cleaning up temporary data from old regions at 1733148591646 (+11 ms)Running coprocessor post-open hooks at 1733148591649 (+3 ms)Region opened successfully at 1733148591650 (+1 ms) 2024-12-02T14:09:51,651 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733148591604 2024-12-02T14:09:51,654 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T14:09:51,654 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T14:09:51,655 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a3a61c9ba14f,43055,1733148590798 2024-12-02T14:09:51,656 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a3a61c9ba14f,43055,1733148590798, state=OPEN 2024-12-02T14:09:51,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:09:51,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:09:51,658 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,43055,1733148590798 2024-12-02T14:09:51,658 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:09:51,658 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:09:51,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T14:09:51,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,43055,1733148590798 in 209 msec 2024-12-02T14:09:51,665 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T14:09:51,665 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 630 msec 2024-12-02T14:09:51,666 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:09:51,666 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T14:09:51,668 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:09:51,668 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a3a61c9ba14f,43055,1733148590798, seqNum=-1] 2024-12-02T14:09:51,668 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:09:51,670 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43763, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:09:51,677 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 696 msec 2024-12-02T14:09:51,678 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733148591677, completionTime=-1 2024-12-02T14:09:51,678 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T14:09:51,678 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T14:09:51,680 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-02T14:09:51,680 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733148651680 2024-12-02T14:09:51,680 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733148711680 2024-12-02T14:09:51,680 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-02T14:09:51,680 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,41009,1733148590752-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,680 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,41009,1733148590752-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,680 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,41009,1733148590752-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,680 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a3a61c9ba14f:41009, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,680 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,681 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,682 DEBUG [master/a3a61c9ba14f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T14:09:51,685 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.847sec 2024-12-02T14:09:51,685 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T14:09:51,685 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T14:09:51,685 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T14:09:51,685 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T14:09:51,685 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T14:09:51,686 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,41009,1733148590752-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:09:51,686 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,41009,1733148590752-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T14:09:51,688 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T14:09:51,688 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T14:09:51,688 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,41009,1733148590752-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:51,731 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34cdd2fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:09:51,731 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a3a61c9ba14f,41009,-1 for getting cluster id 2024-12-02T14:09:51,732 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T14:09:51,733 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'fec2fced-bd64-4f18-8215-7044e6243273' 2024-12-02T14:09:51,734 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T14:09:51,734 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "fec2fced-bd64-4f18-8215-7044e6243273" 2024-12-02T14:09:51,734 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66b4ece3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:09:51,734 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a3a61c9ba14f,41009,-1] 2024-12-02T14:09:51,735 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T14:09:51,735 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:09:51,737 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47066, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T14:09:51,738 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4867bbb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:09:51,738 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:09:51,739 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a3a61c9ba14f,43055,1733148590798, seqNum=-1] 2024-12-02T14:09:51,740 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:09:51,742 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35252, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:09:51,744 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a3a61c9ba14f,41009,1733148590752 2024-12-02T14:09:51,745 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:51,748 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-02T14:09:51,748 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T14:09:51,748 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:09:51,748 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:09:51,749 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:09:51,749 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:09:51,749 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T14:09:51,749 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T14:09:51,749 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=11609354, stopped=false 2024-12-02T14:09:51,749 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a3a61c9ba14f,41009,1733148590752 2024-12-02T14:09:51,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:09:51,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:09:51,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:51,750 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:09:51,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:51,751 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:09:51,751 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:09:51,751 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:09:51,751 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:09:51,751 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:09:51,751 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a3a61c9ba14f,43055,1733148590798' ***** 2024-12-02T14:09:51,751 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T14:09:51,751 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T14:09:51,752 INFO [RS:0;a3a61c9ba14f:43055 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T14:09:51,752 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T14:09:51,752 INFO [RS:0;a3a61c9ba14f:43055 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T14:09:51,752 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(959): stopping server a3a61c9ba14f,43055,1733148590798 2024-12-02T14:09:51,752 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:09:51,752 INFO [RS:0;a3a61c9ba14f:43055 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a3a61c9ba14f:43055. 2024-12-02T14:09:51,752 DEBUG [RS:0;a3a61c9ba14f:43055 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:09:51,752 DEBUG [RS:0;a3a61c9ba14f:43055 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:09:51,752 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T14:09:51,752 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T14:09:51,752 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T14:09:51,752 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T14:09:51,753 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T14:09:51,753 DEBUG [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-02T14:09:51,753 DEBUG [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-02T14:09:51,753 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:09:51,753 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:09:51,753 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:09:51,753 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:09:51,753 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:09:51,753 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-02T14:09:51,777 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/data/hbase/meta/1588230740/.tmp/ns/ab2a15c5310c440bbd93a996977ec6a2 is 43, key is default/ns:d/1733148591671/Put/seqid=0 2024-12-02T14:09:51,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741835_1011 (size=5153) 2024-12-02T14:09:51,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741835_1011 (size=5153) 2024-12-02T14:09:51,784 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/data/hbase/meta/1588230740/.tmp/ns/ab2a15c5310c440bbd93a996977ec6a2 2024-12-02T14:09:51,792 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/data/hbase/meta/1588230740/.tmp/ns/ab2a15c5310c440bbd93a996977ec6a2 as hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/data/hbase/meta/1588230740/ns/ab2a15c5310c440bbd93a996977ec6a2 2024-12-02T14:09:51,800 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/data/hbase/meta/1588230740/ns/ab2a15c5310c440bbd93a996977ec6a2, entries=2, sequenceid=6, filesize=5.0 K 2024-12-02T14:09:51,802 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 49ms, sequenceid=6, compaction requested=false 2024-12-02T14:09:51,808 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T14:09:51,808 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T14:09:51,809 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:09:51,809 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733148591753Running coprocessor pre-close hooks at 1733148591753Disabling compacts and flushes for region at 1733148591753Disabling writes for close at 1733148591753Obtaining lock to block concurrent updates at 1733148591753Preparing flush snapshotting stores in 1588230740 at 1733148591753Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733148591754 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733148591755 (+1 ms)Flushing 1588230740/ns: creating writer at 1733148591755Flushing 1588230740/ns: appending metadata at 1733148591776 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1733148591776Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74868159: reopening flushed file at 1733148591791 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 49ms, sequenceid=6, compaction requested=false at 1733148591802 (+11 ms)Writing region close event to WAL at 1733148591803 (+1 ms)Running coprocessor post-close hooks at 1733148591808 (+5 ms)Closed at 1733148591809 (+1 ms) 2024-12-02T14:09:51,809 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T14:09:51,953 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(976): stopping server a3a61c9ba14f,43055,1733148590798; all regions closed. 2024-12-02T14:09:51,954 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:51,954 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:51,954 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:51,954 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:51,954 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:51,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741834_1010 (size=1152) 2024-12-02T14:09:51,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741834_1010 (size=1152) 2024-12-02T14:09:51,961 DEBUG [RS:0;a3a61c9ba14f:43055 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/oldWALs 2024-12-02T14:09:51,961 INFO [RS:0;a3a61c9ba14f:43055 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C43055%2C1733148590798.meta:.meta(num 1733148591622) 2024-12-02T14:09:51,962 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:51,962 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:51,962 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:51,962 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:51,962 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:51,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741833_1009 (size=93) 2024-12-02T14:09:51,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741833_1009 (size=93) 2024-12-02T14:09:51,968 DEBUG [RS:0;a3a61c9ba14f:43055 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/oldWALs 2024-12-02T14:09:51,968 INFO [RS:0;a3a61c9ba14f:43055 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C43055%2C1733148590798:(num 1733148591200) 2024-12-02T14:09:51,969 DEBUG [RS:0;a3a61c9ba14f:43055 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:09:51,969 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:09:51,969 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:09:51,969 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.ChoreService(370): Chore service for: regionserver/a3a61c9ba14f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T14:09:51,969 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:09:51,969 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:09:51,969 INFO [RS:0;a3a61c9ba14f:43055 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43055 2024-12-02T14:09:51,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:09:51,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a3a61c9ba14f,43055,1733148590798 2024-12-02T14:09:51,971 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:09:51,972 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a3a61c9ba14f,43055,1733148590798] 2024-12-02T14:09:51,973 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a3a61c9ba14f,43055,1733148590798 already deleted, retry=false 2024-12-02T14:09:51,973 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a3a61c9ba14f,43055,1733148590798 expired; onlineServers=0 2024-12-02T14:09:51,973 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a3a61c9ba14f,41009,1733148590752' ***** 2024-12-02T14:09:51,973 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T14:09:51,973 INFO [M:0;a3a61c9ba14f:41009 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:09:51,974 INFO [M:0;a3a61c9ba14f:41009 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:09:51,974 DEBUG [M:0;a3a61c9ba14f:41009 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T14:09:51,974 DEBUG [M:0;a3a61c9ba14f:41009 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T14:09:51,974 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T14:09:51,974 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148590987 {}] cleaner.HFileCleaner(306): Exit Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148590987,5,FailOnTimeoutGroup] 2024-12-02T14:09:51,974 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148590987 {}] cleaner.HFileCleaner(306): Exit Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148590987,5,FailOnTimeoutGroup] 2024-12-02T14:09:51,974 INFO [M:0;a3a61c9ba14f:41009 {}] hbase.ChoreService(370): Chore service for: master/a3a61c9ba14f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T14:09:51,974 INFO [M:0;a3a61c9ba14f:41009 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:09:51,974 DEBUG [M:0;a3a61c9ba14f:41009 {}] master.HMaster(1795): Stopping service threads 2024-12-02T14:09:51,974 INFO [M:0;a3a61c9ba14f:41009 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T14:09:51,974 INFO [M:0;a3a61c9ba14f:41009 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:09:51,975 INFO [M:0;a3a61c9ba14f:41009 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T14:09:51,975 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T14:09:51,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T14:09:51,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:51,975 DEBUG [M:0;a3a61c9ba14f:41009 {}] zookeeper.ZKUtil(347): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T14:09:51,975 WARN [M:0;a3a61c9ba14f:41009 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T14:09:51,976 INFO [M:0;a3a61c9ba14f:41009 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/.lastflushedseqids 2024-12-02T14:09:51,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741836_1012 (size=99) 2024-12-02T14:09:51,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741836_1012 (size=99) 2024-12-02T14:09:51,984 INFO [M:0;a3a61c9ba14f:41009 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T14:09:51,984 INFO [M:0;a3a61c9ba14f:41009 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T14:09:51,984 DEBUG [M:0;a3a61c9ba14f:41009 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:09:51,984 INFO [M:0;a3a61c9ba14f:41009 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:51,984 DEBUG [M:0;a3a61c9ba14f:41009 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:51,984 DEBUG [M:0;a3a61c9ba14f:41009 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:09:51,984 DEBUG [M:0;a3a61c9ba14f:41009 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:51,985 INFO [M:0;a3a61c9ba14f:41009 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-02T14:09:52,000 DEBUG [M:0;a3a61c9ba14f:41009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/318dd4790f8642739efb17a024844e4a is 82, key is hbase:meta,,1/info:regioninfo/1733148591655/Put/seqid=0 2024-12-02T14:09:52,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741837_1013 (size=5672) 2024-12-02T14:09:52,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741837_1013 (size=5672) 2024-12-02T14:09:52,006 INFO [M:0;a3a61c9ba14f:41009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/318dd4790f8642739efb17a024844e4a 2024-12-02T14:09:52,026 DEBUG [M:0;a3a61c9ba14f:41009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/462b4cb893434a2d9f7f6931d25af995 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733148591676/Put/seqid=0 2024-12-02T14:09:52,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741838_1014 (size=5275) 2024-12-02T14:09:52,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741838_1014 (size=5275) 2024-12-02T14:09:52,034 INFO [M:0;a3a61c9ba14f:41009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/462b4cb893434a2d9f7f6931d25af995 2024-12-02T14:09:52,054 DEBUG [M:0;a3a61c9ba14f:41009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9f9baed05ab94213ab48f028d2f929f7 is 69, key is a3a61c9ba14f,43055,1733148590798/rs:state/1733148591044/Put/seqid=0 2024-12-02T14:09:52,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741839_1015 (size=5156) 2024-12-02T14:09:52,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741839_1015 (size=5156) 2024-12-02T14:09:52,060 INFO [M:0;a3a61c9ba14f:41009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9f9baed05ab94213ab48f028d2f929f7 2024-12-02T14:09:52,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:09:52,072 INFO [RS:0;a3a61c9ba14f:43055 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:09:52,073 INFO [RS:0;a3a61c9ba14f:43055 {}] regionserver.HRegionServer(1031): Exiting; stopping=a3a61c9ba14f,43055,1733148590798; zookeeper connection closed. 2024-12-02T14:09:52,073 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43055-0x1009b441efc0001, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:09:52,073 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6bc187f6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6bc187f6 2024-12-02T14:09:52,073 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T14:09:52,080 DEBUG [M:0;a3a61c9ba14f:41009 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5e438ae930964dd1b2ef00a9c6b8b6fb is 52, key is load_balancer_on/state:d/1733148591746/Put/seqid=0 2024-12-02T14:09:52,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741840_1016 (size=5056) 2024-12-02T14:09:52,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741840_1016 (size=5056) 2024-12-02T14:09:52,086 INFO [M:0;a3a61c9ba14f:41009 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5e438ae930964dd1b2ef00a9c6b8b6fb 2024-12-02T14:09:52,093 DEBUG [M:0;a3a61c9ba14f:41009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/318dd4790f8642739efb17a024844e4a as hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/318dd4790f8642739efb17a024844e4a 2024-12-02T14:09:52,099 INFO [M:0;a3a61c9ba14f:41009 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/318dd4790f8642739efb17a024844e4a, entries=8, sequenceid=29, filesize=5.5 K 2024-12-02T14:09:52,101 DEBUG [M:0;a3a61c9ba14f:41009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/462b4cb893434a2d9f7f6931d25af995 as hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/462b4cb893434a2d9f7f6931d25af995 2024-12-02T14:09:52,107 INFO [M:0;a3a61c9ba14f:41009 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/462b4cb893434a2d9f7f6931d25af995, entries=3, sequenceid=29, filesize=5.2 K 2024-12-02T14:09:52,109 DEBUG [M:0;a3a61c9ba14f:41009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9f9baed05ab94213ab48f028d2f929f7 as hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9f9baed05ab94213ab48f028d2f929f7 2024-12-02T14:09:52,116 INFO [M:0;a3a61c9ba14f:41009 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9f9baed05ab94213ab48f028d2f929f7, entries=1, sequenceid=29, filesize=5.0 K 2024-12-02T14:09:52,117 DEBUG [M:0;a3a61c9ba14f:41009 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5e438ae930964dd1b2ef00a9c6b8b6fb as hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5e438ae930964dd1b2ef00a9c6b8b6fb 2024-12-02T14:09:52,124 INFO [M:0;a3a61c9ba14f:41009 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33353/user/jenkins/test-data/aa1c38a9-163e-3012-c137-b33db711bc7d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5e438ae930964dd1b2ef00a9c6b8b6fb, entries=1, sequenceid=29, filesize=4.9 K 2024-12-02T14:09:52,125 INFO [M:0;a3a61c9ba14f:41009 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=29, compaction requested=false 2024-12-02T14:09:52,127 INFO [M:0;a3a61c9ba14f:41009 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:52,127 DEBUG [M:0;a3a61c9ba14f:41009 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733148591984Disabling compacts and flushes for region at 1733148591984Disabling writes for close at 1733148591984Obtaining lock to block concurrent updates at 1733148591985 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733148591985Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733148591985Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733148591986 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733148591986Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733148591999 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733148592000 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733148592012 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733148592026 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733148592026Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733148592039 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733148592053 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733148592053Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733148592065 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733148592080 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733148592080Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a7e285c: reopening flushed file at 1733148592092 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@608a5e20: reopening flushed file at 1733148592100 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24648bd6: reopening flushed file at 1733148592108 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@be85e28: reopening flushed file at 1733148592116 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=29, compaction requested=false at 1733148592125 (+9 ms)Writing region close event to WAL at 1733148592127 (+2 ms)Closed at 1733148592127 2024-12-02T14:09:52,129 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:52,129 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:52,129 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:52,129 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:52,129 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:09:52,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44313 is added to blk_1073741830_1006 (size=10311) 2024-12-02T14:09:52,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32967 is added to blk_1073741830_1006 (size=10311) 2024-12-02T14:09:52,132 INFO [M:0;a3a61c9ba14f:41009 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T14:09:52,132 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:09:52,132 INFO [M:0;a3a61c9ba14f:41009 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41009 2024-12-02T14:09:52,133 INFO [M:0;a3a61c9ba14f:41009 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:09:52,234 INFO [M:0;a3a61c9ba14f:41009 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:09:52,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:09:52,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41009-0x1009b441efc0000, quorum=127.0.0.1:49223, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:09:52,238 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c708570{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:09:52,238 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ab06e68{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:09:52,238 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:09:52,238 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d944f53{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:09:52,239 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@463a48f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/hadoop.log.dir/,STOPPED} 2024-12-02T14:09:52,240 WARN [BP-545261947-172.17.0.2-1733148590165 heartbeating to localhost/127.0.0.1:33353 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:09:52,240 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:09:52,240 WARN [BP-545261947-172.17.0.2-1733148590165 heartbeating to localhost/127.0.0.1:33353 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-545261947-172.17.0.2-1733148590165 (Datanode Uuid ea5a8e05-35d5-4712-8fc8-1a30a2684b1d) service to localhost/127.0.0.1:33353 2024-12-02T14:09:52,240 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:09:52,240 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/cluster_f00d5ed4-71c3-0ac1-132a-8c2e1ba6a980/data/data3/current/BP-545261947-172.17.0.2-1733148590165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:09:52,241 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/cluster_f00d5ed4-71c3-0ac1-132a-8c2e1ba6a980/data/data4/current/BP-545261947-172.17.0.2-1733148590165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:09:52,241 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:09:52,243 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30a1c2a3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:09:52,243 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4620cd8a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:09:52,243 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:09:52,243 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@607b9bc6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:09:52,243 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55f7876e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/hadoop.log.dir/,STOPPED} 2024-12-02T14:09:52,245 WARN [BP-545261947-172.17.0.2-1733148590165 heartbeating to localhost/127.0.0.1:33353 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:09:52,245 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:09:52,245 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:09:52,245 WARN [BP-545261947-172.17.0.2-1733148590165 heartbeating to localhost/127.0.0.1:33353 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-545261947-172.17.0.2-1733148590165 (Datanode Uuid a26a0eea-8d7d-43ef-b904-1478eec4c2be) service to localhost/127.0.0.1:33353 2024-12-02T14:09:52,246 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/cluster_f00d5ed4-71c3-0ac1-132a-8c2e1ba6a980/data/data1/current/BP-545261947-172.17.0.2-1733148590165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:09:52,246 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/cluster_f00d5ed4-71c3-0ac1-132a-8c2e1ba6a980/data/data2/current/BP-545261947-172.17.0.2-1733148590165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:09:52,246 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:09:52,252 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52b0c086{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:09:52,252 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4d9de743{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:09:52,252 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:09:52,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a3c3ceb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:09:52,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bd9c5b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/hadoop.log.dir/,STOPPED} 2024-12-02T14:09:52,258 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T14:09:52,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T14:09:52,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T14:09:52,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/hadoop.log.dir so I do NOT create it in target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339 2024-12-02T14:09:52,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b579ffbc-1238-2c15-f747-ec29ba3f4650/hadoop.tmp.dir so I do NOT create it in target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339 2024-12-02T14:09:52,274 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565, deleteOnExit=true 2024-12-02T14:09:52,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T14:09:52,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/test.cache.data in system properties and HBase conf 2024-12-02T14:09:52,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T14:09:52,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir in system properties and HBase conf 2024-12-02T14:09:52,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T14:09:52,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T14:09:52,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T14:09:52,275 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T14:09:52,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:09:52,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:09:52,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T14:09:52,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:09:52,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T14:09:52,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T14:09:52,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:09:52,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:09:52,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T14:09:52,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/nfs.dump.dir in system properties and HBase conf 2024-12-02T14:09:52,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/java.io.tmpdir in system properties and HBase conf 2024-12-02T14:09:52,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:09:52,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T14:09:52,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T14:09:52,288 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T14:09:52,337 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:09:52,342 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:09:52,345 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:09:52,345 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:09:52,345 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:09:52,346 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:09:52,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ad82de5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:09:52,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@477187fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:09:52,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@42a4a79c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/java.io.tmpdir/jetty-localhost-41715-hadoop-hdfs-3_4_1-tests_jar-_-any-10449328816222162346/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:09:52,438 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@195100a{HTTP/1.1, (http/1.1)}{localhost:41715} 2024-12-02T14:09:52,438 INFO [Time-limited test {}] server.Server(415): Started @102100ms 2024-12-02T14:09:52,450 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T14:09:52,504 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:09:52,507 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:09:52,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:09:52,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:09:52,508 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T14:09:52,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@475f8022{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:09:52,509 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aa07d80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:09:52,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@72b840c3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/java.io.tmpdir/jetty-localhost-46741-hadoop-hdfs-3_4_1-tests_jar-_-any-3187220645869107176/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:09:52,598 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38184680{HTTP/1.1, (http/1.1)}{localhost:46741} 2024-12-02T14:09:52,598 INFO [Time-limited test {}] server.Server(415): Started @102261ms 2024-12-02T14:09:52,600 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:09:52,630 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:09:52,633 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:09:52,634 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:09:52,634 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:09:52,634 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T14:09:52,635 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55c8142a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:09:52,636 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78ab676e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:09:52,657 WARN [Thread-659 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data1/current/BP-272717943-172.17.0.2-1733148592299/current, will proceed with Du for space computation calculation, 2024-12-02T14:09:52,657 WARN [Thread-660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data2/current/BP-272717943-172.17.0.2-1733148592299/current, will proceed with Du for space computation calculation, 2024-12-02T14:09:52,681 WARN [Thread-638 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:09:52,684 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x77937dad5c3735b3 with lease ID 0x8f6b40ac3caacf05: Processing first storage report for DS-b3ac9365-de98-4e21-a500-26ee96b24634 from datanode DatanodeRegistration(127.0.0.1:37515, datanodeUuid=9d35c68d-d059-4d65-a2cc-1e4d8b09039a, infoPort=37909, infoSecurePort=0, ipcPort=36131, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299) 2024-12-02T14:09:52,684 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x77937dad5c3735b3 with lease ID 0x8f6b40ac3caacf05: from storage DS-b3ac9365-de98-4e21-a500-26ee96b24634 node DatanodeRegistration(127.0.0.1:37515, datanodeUuid=9d35c68d-d059-4d65-a2cc-1e4d8b09039a, infoPort=37909, infoSecurePort=0, ipcPort=36131, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:09:52,684 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x77937dad5c3735b3 with lease ID 0x8f6b40ac3caacf05: Processing first storage report for DS-6f2262b5-858d-43fd-91bc-2e20ffdfb2ae from datanode DatanodeRegistration(127.0.0.1:37515, datanodeUuid=9d35c68d-d059-4d65-a2cc-1e4d8b09039a, infoPort=37909, infoSecurePort=0, ipcPort=36131, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299) 2024-12-02T14:09:52,684 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x77937dad5c3735b3 with lease ID 0x8f6b40ac3caacf05: from storage DS-6f2262b5-858d-43fd-91bc-2e20ffdfb2ae node DatanodeRegistration(127.0.0.1:37515, datanodeUuid=9d35c68d-d059-4d65-a2cc-1e4d8b09039a, infoPort=37909, infoSecurePort=0, ipcPort=36131, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:09:52,736 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bb5d847{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/java.io.tmpdir/jetty-localhost-45645-hadoop-hdfs-3_4_1-tests_jar-_-any-9315230323092649020/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:09:52,737 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6e5e2c7f{HTTP/1.1, (http/1.1)}{localhost:45645} 2024-12-02T14:09:52,737 INFO [Time-limited test {}] server.Server(415): Started @102399ms 2024-12-02T14:09:52,738 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:09:52,800 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data3/current/BP-272717943-172.17.0.2-1733148592299/current, will proceed with Du for space computation calculation, 2024-12-02T14:09:52,800 WARN [Thread-686 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data4/current/BP-272717943-172.17.0.2-1733148592299/current, will proceed with Du for space computation calculation, 2024-12-02T14:09:52,820 WARN [Thread-674 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:09:52,823 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8d7492710274579e with lease ID 0x8f6b40ac3caacf06: Processing first storage report for DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c from datanode DatanodeRegistration(127.0.0.1:44871, datanodeUuid=67b633d0-c428-40af-9668-81c34e41e2e9, infoPort=34753, infoSecurePort=0, ipcPort=37221, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299) 2024-12-02T14:09:52,823 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8d7492710274579e with lease ID 0x8f6b40ac3caacf06: from storage DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c node DatanodeRegistration(127.0.0.1:44871, datanodeUuid=67b633d0-c428-40af-9668-81c34e41e2e9, infoPort=34753, infoSecurePort=0, ipcPort=37221, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:09:52,823 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8d7492710274579e with lease ID 0x8f6b40ac3caacf06: Processing first storage report for DS-9694b8e1-55bb-4022-bfe4-12f1205bf9ac from datanode DatanodeRegistration(127.0.0.1:44871, datanodeUuid=67b633d0-c428-40af-9668-81c34e41e2e9, infoPort=34753, infoSecurePort=0, ipcPort=37221, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299) 2024-12-02T14:09:52,823 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8d7492710274579e with lease ID 0x8f6b40ac3caacf06: from storage DS-9694b8e1-55bb-4022-bfe4-12f1205bf9ac node DatanodeRegistration(127.0.0.1:44871, datanodeUuid=67b633d0-c428-40af-9668-81c34e41e2e9, infoPort=34753, infoSecurePort=0, ipcPort=37221, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T14:09:52,866 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339 2024-12-02T14:09:52,869 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/zookeeper_0, clientPort=64260, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T14:09:52,870 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64260 2024-12-02T14:09:52,871 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:52,872 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:52,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:09:52,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37515 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:09:52,886 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b with version=8 2024-12-02T14:09:52,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/hbase-staging 2024-12-02T14:09:52,890 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:09:52,890 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:52,890 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:52,890 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:09:52,890 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:52,890 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:09:52,890 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T14:09:52,891 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:09:52,892 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37979 2024-12-02T14:09:52,893 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37979 connecting to ZooKeeper ensemble=127.0.0.1:64260 2024-12-02T14:09:52,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:379790x0, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:09:52,898 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37979-0x1009b4427530000 connected 2024-12-02T14:09:52,914 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:52,916 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:52,918 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:09:52,919 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b, hbase.cluster.distributed=false 2024-12-02T14:09:52,920 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:09:52,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37979 2024-12-02T14:09:52,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37979 2024-12-02T14:09:52,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37979 2024-12-02T14:09:52,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37979 2024-12-02T14:09:52,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37979 2024-12-02T14:09:52,936 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:09:52,936 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:52,936 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:52,936 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:09:52,936 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:52,936 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:09:52,936 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T14:09:52,936 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:09:52,937 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32833 2024-12-02T14:09:52,938 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:32833 connecting to ZooKeeper ensemble=127.0.0.1:64260 2024-12-02T14:09:52,939 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:52,940 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:52,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:328330x0, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:09:52,944 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:09:52,944 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32833-0x1009b4427530001 connected 2024-12-02T14:09:52,945 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T14:09:52,945 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T14:09:52,946 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T14:09:52,947 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:09:52,948 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32833 2024-12-02T14:09:52,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32833 2024-12-02T14:09:52,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32833 2024-12-02T14:09:52,954 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32833 2024-12-02T14:09:52,956 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32833 2024-12-02T14:09:52,967 DEBUG [M:0;a3a61c9ba14f:37979 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a3a61c9ba14f:37979 2024-12-02T14:09:52,967 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a3a61c9ba14f,37979,1733148592890 2024-12-02T14:09:52,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:09:52,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:09:52,969 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a3a61c9ba14f,37979,1733148592890 2024-12-02T14:09:52,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T14:09:52,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:52,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:52,970 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T14:09:52,971 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a3a61c9ba14f,37979,1733148592890 from backup master directory 2024-12-02T14:09:52,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a3a61c9ba14f,37979,1733148592890 2024-12-02T14:09:52,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:09:52,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:09:52,972 WARN [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:09:52,972 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a3a61c9ba14f,37979,1733148592890 2024-12-02T14:09:52,977 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/hbase.id] with ID: f18dd18c-ca9c-4351-9d4a-8b544b06575f 2024-12-02T14:09:52,977 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/.tmp/hbase.id 2024-12-02T14:09:52,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:09:52,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37515 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:09:52,988 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/.tmp/hbase.id]:[hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/hbase.id] 2024-12-02T14:09:53,002 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:53,002 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T14:09:53,003 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-02T14:09:53,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:53,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:53,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:09:53,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37515 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:09:53,015 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T14:09:53,016 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T14:09:53,017 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:09:53,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37515 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:09:53,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:09:53,027 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store 2024-12-02T14:09:53,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:09:53,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37515 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:09:53,039 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:09:53,039 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:09:53,039 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:53,039 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:53,039 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:09:53,039 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:53,039 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:09:53,039 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733148593039Disabling compacts and flushes for region at 1733148593039Disabling writes for close at 1733148593039Writing region close event to WAL at 1733148593039Closed at 1733148593039 2024-12-02T14:09:53,040 WARN [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/.initializing 2024-12-02T14:09:53,040 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890 2024-12-02T14:09:53,043 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C37979%2C1733148592890, suffix=, logDir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890, archiveDir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/oldWALs, maxLogs=10 2024-12-02T14:09:53,044 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 2024-12-02T14:09:53,049 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 2024-12-02T14:09:53,053 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37909:37909),(127.0.0.1/127.0.0.1:34753:34753)] 2024-12-02T14:09:53,053 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:09:53,054 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:09:53,054 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:53,054 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:53,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:53,057 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T14:09:53,057 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:53,058 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:53,058 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:53,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T14:09:53,059 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:53,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:09:53,060 INFO [regionserver/a3a61c9ba14f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:09:53,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:53,061 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T14:09:53,062 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:53,062 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:09:53,062 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:53,063 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T14:09:53,063 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:53,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:09:53,064 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:53,065 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:53,066 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:53,067 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:53,067 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:53,068 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T14:09:53,069 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:09:53,071 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:09:53,071 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=840492, jitterRate=0.06874126195907593}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T14:09:53,072 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733148593054Initializing all the Stores at 1733148593055 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148593055Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148593055Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148593055Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148593055Cleaning up temporary data from old regions at 1733148593067 (+12 ms)Region opened successfully at 1733148593072 (+5 ms) 2024-12-02T14:09:53,073 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T14:09:53,077 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@595955ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:09:53,078 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T14:09:53,079 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T14:09:53,079 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T14:09:53,079 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T14:09:53,080 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T14:09:53,080 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-02T14:09:53,080 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T14:09:53,083 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T14:09:53,084 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T14:09:53,085 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T14:09:53,085 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T14:09:53,086 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T14:09:53,087 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T14:09:53,087 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T14:09:53,088 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T14:09:53,089 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T14:09:53,090 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T14:09:53,091 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T14:09:53,092 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T14:09:53,093 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T14:09:53,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:09:53,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:09:53,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:53,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:53,095 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a3a61c9ba14f,37979,1733148592890, sessionid=0x1009b4427530000, setting cluster-up flag (Was=false) 2024-12-02T14:09:53,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:53,096 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:53,099 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T14:09:53,100 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a3a61c9ba14f,37979,1733148592890 2024-12-02T14:09:53,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:53,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:53,107 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T14:09:53,107 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a3a61c9ba14f,37979,1733148592890 2024-12-02T14:09:53,109 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T14:09:53,111 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T14:09:53,111 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T14:09:53,111 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T14:09:53,111 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a3a61c9ba14f,37979,1733148592890 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T14:09:53,113 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:09:53,113 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:09:53,113 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:09:53,113 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:09:53,113 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a3a61c9ba14f:0, corePoolSize=10, maxPoolSize=10 2024-12-02T14:09:53,113 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,113 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:09:53,113 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,114 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733148623114 2024-12-02T14:09:53,114 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T14:09:53,114 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T14:09:53,114 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T14:09:53,114 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T14:09:53,114 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T14:09:53,114 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T14:09:53,115 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:09:53,115 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T14:09:53,115 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,116 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:53,116 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T14:09:53,117 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T14:09:53,117 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T14:09:53,117 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T14:09:53,118 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T14:09:53,118 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T14:09:53,121 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148593118,5,FailOnTimeoutGroup] 2024-12-02T14:09:53,121 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148593121,5,FailOnTimeoutGroup] 2024-12-02T14:09:53,121 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,122 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T14:09:53,122 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,122 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37515 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:09:53,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:09:53,128 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T14:09:53,128 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b 2024-12-02T14:09:53,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:09:53,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37515 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:09:53,137 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:09:53,139 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:09:53,140 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:09:53,140 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:53,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:53,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:09:53,143 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:09:53,143 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:53,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:53,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:09:53,145 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:09:53,145 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:53,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:53,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:09:53,148 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:09:53,148 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:53,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:53,148 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:09:53,149 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740 2024-12-02T14:09:53,149 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740 2024-12-02T14:09:53,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:09:53,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:09:53,151 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:09:53,152 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:09:53,154 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:09:53,155 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762081, jitterRate=-0.03096407651901245}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:09:53,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733148593137Initializing all the Stores at 1733148593138 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148593138Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148593139 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148593139Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148593139Cleaning up temporary data from old regions at 1733148593151 (+12 ms)Region opened successfully at 1733148593155 (+4 ms) 2024-12-02T14:09:53,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:09:53,156 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:09:53,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:09:53,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:09:53,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:09:53,156 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:09:53,156 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733148593156Disabling compacts and flushes for region at 1733148593156Disabling writes for close at 1733148593156Writing region close event to WAL at 1733148593156Closed at 1733148593156 2024-12-02T14:09:53,158 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:09:53,158 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T14:09:53,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T14:09:53,158 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(746): ClusterId : f18dd18c-ca9c-4351-9d4a-8b544b06575f 2024-12-02T14:09:53,158 DEBUG [RS:0;a3a61c9ba14f:32833 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T14:09:53,159 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:09:53,160 DEBUG [RS:0;a3a61c9ba14f:32833 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T14:09:53,160 DEBUG [RS:0;a3a61c9ba14f:32833 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T14:09:53,161 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T14:09:53,161 DEBUG [RS:0;a3a61c9ba14f:32833 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T14:09:53,162 DEBUG [RS:0;a3a61c9ba14f:32833 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32cb990, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:09:53,173 DEBUG [RS:0;a3a61c9ba14f:32833 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a3a61c9ba14f:32833 2024-12-02T14:09:53,173 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T14:09:53,173 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T14:09:53,173 DEBUG [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T14:09:53,174 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(2659): reportForDuty to master=a3a61c9ba14f,37979,1733148592890 with port=32833, startcode=1733148592935 2024-12-02T14:09:53,174 DEBUG [RS:0;a3a61c9ba14f:32833 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T14:09:53,176 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42517, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T14:09:53,176 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37979 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a3a61c9ba14f,32833,1733148592935 2024-12-02T14:09:53,177 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37979 {}] master.ServerManager(517): Registering regionserver=a3a61c9ba14f,32833,1733148592935 2024-12-02T14:09:53,179 DEBUG [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b 2024-12-02T14:09:53,179 DEBUG [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33497 2024-12-02T14:09:53,179 DEBUG [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T14:09:53,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:09:53,181 DEBUG [RS:0;a3a61c9ba14f:32833 {}] zookeeper.ZKUtil(111): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a3a61c9ba14f,32833,1733148592935 2024-12-02T14:09:53,181 WARN [RS:0;a3a61c9ba14f:32833 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:09:53,181 INFO [RS:0;a3a61c9ba14f:32833 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:09:53,181 DEBUG [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935 2024-12-02T14:09:53,181 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a3a61c9ba14f,32833,1733148592935] 2024-12-02T14:09:53,185 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T14:09:53,187 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T14:09:53,187 INFO [RS:0;a3a61c9ba14f:32833 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T14:09:53,187 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,187 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T14:09:53,188 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T14:09:53,188 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,188 DEBUG [RS:0;a3a61c9ba14f:32833 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,189 DEBUG [RS:0;a3a61c9ba14f:32833 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,189 DEBUG [RS:0;a3a61c9ba14f:32833 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,189 DEBUG [RS:0;a3a61c9ba14f:32833 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,189 DEBUG [RS:0;a3a61c9ba14f:32833 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,189 DEBUG [RS:0;a3a61c9ba14f:32833 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:09:53,189 DEBUG [RS:0;a3a61c9ba14f:32833 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,189 DEBUG [RS:0;a3a61c9ba14f:32833 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,189 DEBUG [RS:0;a3a61c9ba14f:32833 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,189 DEBUG [RS:0;a3a61c9ba14f:32833 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,189 DEBUG [RS:0;a3a61c9ba14f:32833 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,189 DEBUG [RS:0;a3a61c9ba14f:32833 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,189 DEBUG [RS:0;a3a61c9ba14f:32833 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:09:53,189 DEBUG [RS:0;a3a61c9ba14f:32833 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:09:53,191 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,192 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,192 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,192 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,192 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,192 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,32833,1733148592935-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:09:53,205 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T14:09:53,205 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,32833,1733148592935-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,205 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,206 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.Replication(171): a3a61c9ba14f,32833,1733148592935 started 2024-12-02T14:09:53,240 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,240 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(1482): Serving as a3a61c9ba14f,32833,1733148592935, RpcServer on a3a61c9ba14f/172.17.0.2:32833, sessionid=0x1009b4427530001 2024-12-02T14:09:53,240 DEBUG [RS:0;a3a61c9ba14f:32833 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T14:09:53,240 DEBUG [RS:0;a3a61c9ba14f:32833 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a3a61c9ba14f,32833,1733148592935 2024-12-02T14:09:53,241 DEBUG [RS:0;a3a61c9ba14f:32833 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,32833,1733148592935' 2024-12-02T14:09:53,241 DEBUG [RS:0;a3a61c9ba14f:32833 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T14:09:53,241 DEBUG [RS:0;a3a61c9ba14f:32833 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T14:09:53,242 DEBUG [RS:0;a3a61c9ba14f:32833 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T14:09:53,242 DEBUG [RS:0;a3a61c9ba14f:32833 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T14:09:53,242 DEBUG [RS:0;a3a61c9ba14f:32833 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a3a61c9ba14f,32833,1733148592935 2024-12-02T14:09:53,242 DEBUG [RS:0;a3a61c9ba14f:32833 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,32833,1733148592935' 2024-12-02T14:09:53,242 DEBUG [RS:0;a3a61c9ba14f:32833 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T14:09:53,242 DEBUG [RS:0;a3a61c9ba14f:32833 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T14:09:53,243 DEBUG [RS:0;a3a61c9ba14f:32833 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T14:09:53,243 INFO [RS:0;a3a61c9ba14f:32833 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T14:09:53,243 INFO [RS:0;a3a61c9ba14f:32833 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T14:09:53,311 WARN [a3a61c9ba14f:37979 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T14:09:53,345 INFO [RS:0;a3a61c9ba14f:32833 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C32833%2C1733148592935, suffix=, logDir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935, archiveDir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/oldWALs, maxLogs=32 2024-12-02T14:09:53,346 INFO [RS:0;a3a61c9ba14f:32833 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 2024-12-02T14:09:53,354 INFO [RS:0;a3a61c9ba14f:32833 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 2024-12-02T14:09:53,357 DEBUG [RS:0;a3a61c9ba14f:32833 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34753:34753),(127.0.0.1/127.0.0.1:37909:37909)] 2024-12-02T14:09:53,561 DEBUG [a3a61c9ba14f:37979 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T14:09:53,563 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a3a61c9ba14f,32833,1733148592935 2024-12-02T14:09:53,567 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a3a61c9ba14f,32833,1733148592935, state=OPENING 2024-12-02T14:09:53,570 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T14:09:53,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:53,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:09:53,575 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:09:53,575 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:09:53,575 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:09:53,575 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,32833,1733148592935}] 2024-12-02T14:09:53,730 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T14:09:53,736 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46949, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T14:09:53,744 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T14:09:53,744 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:09:53,746 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C32833%2C1733148592935.meta, suffix=.meta, logDir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935, archiveDir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/oldWALs, maxLogs=32 2024-12-02T14:09:53,747 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta 2024-12-02T14:09:53,751 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta 2024-12-02T14:09:53,752 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37909:37909),(127.0.0.1/127.0.0.1:34753:34753)] 2024-12-02T14:09:53,753 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:09:53,754 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T14:09:53,754 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T14:09:53,754 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T14:09:53,754 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T14:09:53,754 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:09:53,754 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T14:09:53,754 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T14:09:53,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:09:53,757 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:09:53,757 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:53,757 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:53,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:09:53,759 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:09:53,759 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:53,759 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:53,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:09:53,760 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:09:53,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:53,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:53,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:09:53,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:09:53,762 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:53,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:09:53,763 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:09:53,764 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740 2024-12-02T14:09:53,766 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740 2024-12-02T14:09:53,768 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:09:53,768 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:09:53,769 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:09:53,770 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:09:53,771 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=803325, jitterRate=0.021481484174728394}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:09:53,771 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T14:09:53,772 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733148593754Writing region info on filesystem at 1733148593754Initializing all the Stores at 1733148593755 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148593755Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148593756 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148593756Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148593756Cleaning up temporary data from old regions at 1733148593768 (+12 ms)Running coprocessor post-open hooks at 1733148593771 (+3 ms)Region opened successfully at 1733148593772 (+1 ms) 2024-12-02T14:09:53,773 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733148593730 2024-12-02T14:09:53,776 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T14:09:53,776 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T14:09:53,777 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a3a61c9ba14f,32833,1733148592935 2024-12-02T14:09:53,778 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a3a61c9ba14f,32833,1733148592935, state=OPEN 2024-12-02T14:09:53,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:09:53,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:09:53,780 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,32833,1733148592935 2024-12-02T14:09:53,780 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:09:53,781 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:09:53,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T14:09:53,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,32833,1733148592935 in 205 msec 2024-12-02T14:09:53,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T14:09:53,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 625 msec 2024-12-02T14:09:53,787 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:09:53,787 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T14:09:53,789 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:09:53,789 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a3a61c9ba14f,32833,1733148592935, seqNum=-1] 2024-12-02T14:09:53,789 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:09:53,790 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51593, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:09:53,796 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 685 msec 2024-12-02T14:09:53,796 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733148593796, completionTime=-1 2024-12-02T14:09:53,797 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T14:09:53,797 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T14:09:53,799 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-02T14:09:53,799 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733148653799 2024-12-02T14:09:53,799 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733148713799 2024-12-02T14:09:53,799 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-02T14:09:53,799 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,37979,1733148592890-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,799 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,37979,1733148592890-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,800 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,37979,1733148592890-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,800 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a3a61c9ba14f:37979, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,800 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,800 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,802 DEBUG [master/a3a61c9ba14f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T14:09:53,805 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.832sec 2024-12-02T14:09:53,805 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T14:09:53,805 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T14:09:53,805 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T14:09:53,805 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T14:09:53,805 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T14:09:53,805 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,37979,1733148592890-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:09:53,805 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,37979,1733148592890-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T14:09:53,808 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T14:09:53,808 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T14:09:53,808 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,37979,1733148592890-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,859 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6076216, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:09:53,859 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a3a61c9ba14f,37979,-1 for getting cluster id 2024-12-02T14:09:53,859 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T14:09:53,861 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f18dd18c-ca9c-4351-9d4a-8b544b06575f' 2024-12-02T14:09:53,861 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T14:09:53,861 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f18dd18c-ca9c-4351-9d4a-8b544b06575f" 2024-12-02T14:09:53,862 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f2f8d94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:09:53,862 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a3a61c9ba14f,37979,-1] 2024-12-02T14:09:53,862 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T14:09:53,862 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:09:53,864 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60270, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T14:09:53,865 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73b17c7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:09:53,865 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:09:53,866 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a3a61c9ba14f,32833,1733148592935, seqNum=-1] 2024-12-02T14:09:53,866 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:09:53,868 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43418, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:09:53,870 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a3a61c9ba14f,37979,1733148592890 2024-12-02T14:09:53,871 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:53,873 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-02T14:09:53,888 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:09:53,888 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:53,888 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:53,888 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:09:53,888 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:09:53,888 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:09:53,888 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T14:09:53,888 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:09:53,889 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36269 2024-12-02T14:09:53,890 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36269 connecting to ZooKeeper ensemble=127.0.0.1:64260 2024-12-02T14:09:53,891 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:53,892 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:09:53,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:362690x0, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:09:53,896 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:362690x0, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-02T14:09:53,896 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-02T14:09:53,896 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36269-0x1009b4427530002 connected 2024-12-02T14:09:53,897 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T14:09:53,897 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T14:09:53,898 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:36269-0x1009b4427530002, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T14:09:53,900 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36269-0x1009b4427530002, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:09:53,900 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36269 2024-12-02T14:09:53,901 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36269 2024-12-02T14:09:53,905 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36269 2024-12-02T14:09:53,906 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36269 2024-12-02T14:09:53,907 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36269 2024-12-02T14:09:53,909 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.HRegionServer(746): ClusterId : f18dd18c-ca9c-4351-9d4a-8b544b06575f 2024-12-02T14:09:53,909 DEBUG [RS:1;a3a61c9ba14f:36269 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T14:09:53,911 DEBUG [RS:1;a3a61c9ba14f:36269 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T14:09:53,911 DEBUG [RS:1;a3a61c9ba14f:36269 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T14:09:53,913 DEBUG [RS:1;a3a61c9ba14f:36269 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T14:09:53,914 DEBUG [RS:1;a3a61c9ba14f:36269 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e89aee5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:09:53,924 DEBUG [RS:1;a3a61c9ba14f:36269 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;a3a61c9ba14f:36269 2024-12-02T14:09:53,924 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T14:09:53,924 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T14:09:53,924 DEBUG [RS:1;a3a61c9ba14f:36269 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T14:09:53,925 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.HRegionServer(2659): reportForDuty to master=a3a61c9ba14f,37979,1733148592890 with port=36269, startcode=1733148593887 2024-12-02T14:09:53,925 DEBUG [RS:1;a3a61c9ba14f:36269 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T14:09:53,927 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59897, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T14:09:53,928 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37979 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a3a61c9ba14f,36269,1733148593887 2024-12-02T14:09:53,928 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37979 {}] master.ServerManager(517): Registering regionserver=a3a61c9ba14f,36269,1733148593887 2024-12-02T14:09:53,930 DEBUG [RS:1;a3a61c9ba14f:36269 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b 2024-12-02T14:09:53,930 DEBUG [RS:1;a3a61c9ba14f:36269 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33497 2024-12-02T14:09:53,930 DEBUG [RS:1;a3a61c9ba14f:36269 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T14:09:53,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:09:53,932 DEBUG [RS:1;a3a61c9ba14f:36269 {}] zookeeper.ZKUtil(111): regionserver:36269-0x1009b4427530002, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a3a61c9ba14f,36269,1733148593887 2024-12-02T14:09:53,932 WARN [RS:1;a3a61c9ba14f:36269 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:09:53,932 INFO [RS:1;a3a61c9ba14f:36269 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:09:53,932 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a3a61c9ba14f,36269,1733148593887] 2024-12-02T14:09:53,932 DEBUG [RS:1;a3a61c9ba14f:36269 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887 2024-12-02T14:09:53,935 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T14:09:53,937 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T14:09:53,937 INFO [RS:1;a3a61c9ba14f:36269 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T14:09:53,937 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,937 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T14:09:53,938 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T14:09:53,938 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,938 DEBUG [RS:1;a3a61c9ba14f:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,938 DEBUG [RS:1;a3a61c9ba14f:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,938 DEBUG [RS:1;a3a61c9ba14f:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,939 DEBUG [RS:1;a3a61c9ba14f:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,939 DEBUG [RS:1;a3a61c9ba14f:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,939 DEBUG [RS:1;a3a61c9ba14f:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:09:53,939 DEBUG [RS:1;a3a61c9ba14f:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,939 DEBUG [RS:1;a3a61c9ba14f:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,939 DEBUG [RS:1;a3a61c9ba14f:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,939 DEBUG [RS:1;a3a61c9ba14f:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,939 DEBUG [RS:1;a3a61c9ba14f:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,939 DEBUG [RS:1;a3a61c9ba14f:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:09:53,939 DEBUG [RS:1;a3a61c9ba14f:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:09:53,939 DEBUG [RS:1;a3a61c9ba14f:36269 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:09:53,939 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,939 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,939 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,940 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,940 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,940 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,36269,1733148593887-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:09:53,953 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T14:09:53,953 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,36269,1733148593887-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,953 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,953 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.Replication(171): a3a61c9ba14f,36269,1733148593887 started 2024-12-02T14:09:53,964 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:09:53,964 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.HRegionServer(1482): Serving as a3a61c9ba14f,36269,1733148593887, RpcServer on a3a61c9ba14f/172.17.0.2:36269, sessionid=0x1009b4427530002 2024-12-02T14:09:53,964 DEBUG [RS:1;a3a61c9ba14f:36269 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T14:09:53,964 DEBUG [RS:1;a3a61c9ba14f:36269 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a3a61c9ba14f,36269,1733148593887 2024-12-02T14:09:53,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;a3a61c9ba14f:36269,5,FailOnTimeoutGroup] 2024-12-02T14:09:53,964 DEBUG [RS:1;a3a61c9ba14f:36269 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,36269,1733148593887' 2024-12-02T14:09:53,964 DEBUG [RS:1;a3a61c9ba14f:36269 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T14:09:53,964 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-02T14:09:53,965 DEBUG [RS:1;a3a61c9ba14f:36269 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T14:09:53,965 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T14:09:53,965 DEBUG [RS:1;a3a61c9ba14f:36269 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T14:09:53,965 DEBUG [RS:1;a3a61c9ba14f:36269 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T14:09:53,965 DEBUG [RS:1;a3a61c9ba14f:36269 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a3a61c9ba14f,36269,1733148593887 2024-12-02T14:09:53,965 DEBUG [RS:1;a3a61c9ba14f:36269 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,36269,1733148593887' 2024-12-02T14:09:53,965 DEBUG [RS:1;a3a61c9ba14f:36269 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T14:09:53,966 DEBUG [RS:1;a3a61c9ba14f:36269 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T14:09:53,966 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is a3a61c9ba14f,37979,1733148592890 2024-12-02T14:09:53,966 DEBUG [RS:1;a3a61c9ba14f:36269 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T14:09:53,966 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@29e99e4f 2024-12-02T14:09:53,966 INFO [RS:1;a3a61c9ba14f:36269 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T14:09:53,966 INFO [RS:1;a3a61c9ba14f:36269 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T14:09:53,966 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T14:09:53,968 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60272, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T14:09:53,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37979 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T14:09:53,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37979 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T14:09:53,969 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37979 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T14:09:53,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37979 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-02T14:09:53,972 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T14:09:53,972 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:53,972 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37979 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-02T14:09:53,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37979 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T14:09:53,973 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T14:09:53,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37515 is added to blk_1073741835_1011 (size=393) 2024-12-02T14:09:53,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741835_1011 (size=393) 2024-12-02T14:09:53,983 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6a07bf265a12f1bc622a306390bdc470, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b 2024-12-02T14:09:53,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37515 is added to blk_1073741836_1012 (size=76) 2024-12-02T14:09:53,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44871 is added to blk_1073741836_1012 (size=76) 2024-12-02T14:09:53,990 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:09:53,990 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 6a07bf265a12f1bc622a306390bdc470, disabling compactions & flushes 2024-12-02T14:09:53,990 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:09:53,990 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:09:53,990 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. after waiting 0 ms 2024-12-02T14:09:53,990 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:09:53,990 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:09:53,990 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6a07bf265a12f1bc622a306390bdc470: Waiting for close lock at 1733148593990Disabling compacts and flushes for region at 1733148593990Disabling writes for close at 1733148593990Writing region close event to WAL at 1733148593990Closed at 1733148593990 2024-12-02T14:09:53,992 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T14:09:53,992 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733148593992"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733148593992"}]},"ts":"1733148593992"} 2024-12-02T14:09:53,995 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T14:09:53,996 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T14:09:53,996 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733148593996"}]},"ts":"1733148593996"} 2024-12-02T14:09:53,999 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-02T14:09:53,999 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6a07bf265a12f1bc622a306390bdc470, ASSIGN}] 2024-12-02T14:09:54,001 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6a07bf265a12f1bc622a306390bdc470, ASSIGN 2024-12-02T14:09:54,002 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6a07bf265a12f1bc622a306390bdc470, ASSIGN; state=OFFLINE, location=a3a61c9ba14f,32833,1733148592935; forceNewPlan=false, retain=false 2024-12-02T14:09:54,071 INFO [RS:1;a3a61c9ba14f:36269 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C36269%2C1733148593887, suffix=, logDir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887, archiveDir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/oldWALs, maxLogs=32 2024-12-02T14:09:54,074 INFO [RS:1;a3a61c9ba14f:36269 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 2024-12-02T14:09:54,084 INFO [RS:1;a3a61c9ba14f:36269 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 2024-12-02T14:09:54,085 DEBUG [RS:1;a3a61c9ba14f:36269 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37909:37909),(127.0.0.1/127.0.0.1:34753:34753)] 2024-12-02T14:09:54,153 INFO [a3a61c9ba14f:37979 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T14:09:54,154 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6a07bf265a12f1bc622a306390bdc470, regionState=OPENING, regionLocation=a3a61c9ba14f,32833,1733148592935 2024-12-02T14:09:54,160 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6a07bf265a12f1bc622a306390bdc470, ASSIGN because future has completed 2024-12-02T14:09:54,162 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6a07bf265a12f1bc622a306390bdc470, server=a3a61c9ba14f,32833,1733148592935}] 2024-12-02T14:09:54,330 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:09:54,330 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 6a07bf265a12f1bc622a306390bdc470, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:09:54,331 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:09:54,331 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:09:54,331 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:09:54,331 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:09:54,333 INFO [StoreOpener-6a07bf265a12f1bc622a306390bdc470-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:09:54,335 INFO [StoreOpener-6a07bf265a12f1bc622a306390bdc470-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a07bf265a12f1bc622a306390bdc470 columnFamilyName info 2024-12-02T14:09:54,335 DEBUG [StoreOpener-6a07bf265a12f1bc622a306390bdc470-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:09:54,336 INFO [StoreOpener-6a07bf265a12f1bc622a306390bdc470-1 {}] regionserver.HStore(327): Store=6a07bf265a12f1bc622a306390bdc470/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:09:54,336 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:09:54,337 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:09:54,337 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:09:54,337 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:09:54,337 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:09:54,339 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:09:54,342 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:09:54,343 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 6a07bf265a12f1bc622a306390bdc470; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=844594, jitterRate=0.07395684719085693}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:09:54,343 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:09:54,344 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 6a07bf265a12f1bc622a306390bdc470: Running coprocessor pre-open hook at 1733148594331Writing region info on filesystem at 1733148594331Initializing all the Stores at 1733148594332 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148594332Cleaning up temporary data from old regions at 1733148594337 (+5 ms)Running coprocessor post-open hooks at 1733148594343 (+6 ms)Region opened successfully at 1733148594344 (+1 ms) 2024-12-02T14:09:54,345 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470., pid=6, masterSystemTime=1733148594319 2024-12-02T14:09:54,348 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:09:54,348 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:09:54,349 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=6a07bf265a12f1bc622a306390bdc470, regionState=OPEN, openSeqNum=2, regionLocation=a3a61c9ba14f,32833,1733148592935 2024-12-02T14:09:54,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6a07bf265a12f1bc622a306390bdc470, server=a3a61c9ba14f,32833,1733148592935 because future has completed 2024-12-02T14:09:54,355 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T14:09:54,355 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 6a07bf265a12f1bc622a306390bdc470, server=a3a61c9ba14f,32833,1733148592935 in 191 msec 2024-12-02T14:09:54,357 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T14:09:54,357 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=6a07bf265a12f1bc622a306390bdc470, ASSIGN in 356 msec 2024-12-02T14:09:54,358 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T14:09:54,358 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733148594358"}]},"ts":"1733148594358"} 2024-12-02T14:09:54,361 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-02T14:09:54,362 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T14:09:54,364 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 393 msec 2024-12-02T14:09:54,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:09:54,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:09:55,141 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T14:09:55,145 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:09:55,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:09:55,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:09:55,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:09:55,175 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-02T14:09:55,175 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-02T14:09:55,176 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-02T14:09:59,185 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T14:09:59,187 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-02T14:10:00,681 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T14:10:00,685 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:00,704 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:00,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:00,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:04,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37979 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T14:10:04,071 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-02T14:10:04,071 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-02T14:10:04,078 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-02T14:10:04,078 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:10:04,094 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:10:04,096 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:10:04,097 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:10:04,097 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:10:04,097 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:10:04,098 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22d0350b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:10:04,098 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65266587{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:10:04,186 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44952386{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/java.io.tmpdir/jetty-localhost-36331-hadoop-hdfs-3_4_1-tests_jar-_-any-2954334209795198985/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:04,186 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@57ab428e{HTTP/1.1, (http/1.1)}{localhost:36331} 2024-12-02T14:10:04,187 INFO [Time-limited test {}] server.Server(415): Started @113849ms 2024-12-02T14:10:04,188 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:10:04,214 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:10:04,217 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:10:04,218 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:10:04,218 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:10:04,218 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T14:10:04,219 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3df1987c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:10:04,219 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@458e5bcf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:10:04,245 WARN [Thread-832 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data6/current/BP-272717943-172.17.0.2-1733148592299/current, will proceed with Du for space computation calculation, 2024-12-02T14:10:04,245 WARN [Thread-831 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data5/current/BP-272717943-172.17.0.2-1733148592299/current, will proceed with Du for space computation calculation, 2024-12-02T14:10:04,258 WARN [Thread-811 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:10:04,260 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3bb1cdff81732bef with lease ID 0x8f6b40ac3caacf07: Processing first storage report for DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8 from datanode DatanodeRegistration(127.0.0.1:33603, datanodeUuid=0465bcc1-b8c6-499b-b84f-3f7380979272, infoPort=39631, infoSecurePort=0, ipcPort=35877, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299) 2024-12-02T14:10:04,260 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3bb1cdff81732bef with lease ID 0x8f6b40ac3caacf07: from storage DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8 node DatanodeRegistration(127.0.0.1:33603, datanodeUuid=0465bcc1-b8c6-499b-b84f-3f7380979272, infoPort=39631, infoSecurePort=0, ipcPort=35877, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:10:04,260 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3bb1cdff81732bef with lease ID 0x8f6b40ac3caacf07: Processing first storage report for DS-7ed66382-4c49-4e72-a92f-fab00d353aa3 from datanode DatanodeRegistration(127.0.0.1:33603, datanodeUuid=0465bcc1-b8c6-499b-b84f-3f7380979272, infoPort=39631, infoSecurePort=0, ipcPort=35877, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299) 2024-12-02T14:10:04,260 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3bb1cdff81732bef with lease ID 0x8f6b40ac3caacf07: from storage DS-7ed66382-4c49-4e72-a92f-fab00d353aa3 node DatanodeRegistration(127.0.0.1:33603, datanodeUuid=0465bcc1-b8c6-499b-b84f-3f7380979272, infoPort=39631, infoSecurePort=0, ipcPort=35877, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:10:04,313 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30008f24{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/java.io.tmpdir/jetty-localhost-37099-hadoop-hdfs-3_4_1-tests_jar-_-any-18267833052823745/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:04,314 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54c06144{HTTP/1.1, (http/1.1)}{localhost:37099} 2024-12-02T14:10:04,314 INFO [Time-limited test {}] server.Server(415): Started @113976ms 2024-12-02T14:10:04,315 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:10:04,343 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:10:04,346 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:10:04,349 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:10:04,349 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:10:04,349 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:10:04,350 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f0760d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:10:04,350 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47d01054{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:10:04,372 WARN [Thread-866 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data7/current/BP-272717943-172.17.0.2-1733148592299/current, will proceed with Du for space computation calculation, 2024-12-02T14:10:04,372 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data8/current/BP-272717943-172.17.0.2-1733148592299/current, will proceed with Du for space computation calculation, 2024-12-02T14:10:04,389 WARN [Thread-846 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:10:04,392 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8cb92462026cdd83 with lease ID 0x8f6b40ac3caacf08: Processing first storage report for DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867 from datanode DatanodeRegistration(127.0.0.1:34991, datanodeUuid=a8c2c0f6-b427-49c3-bf61-4267a1027aa2, infoPort=34577, infoSecurePort=0, ipcPort=40885, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299) 2024-12-02T14:10:04,392 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8cb92462026cdd83 with lease ID 0x8f6b40ac3caacf08: from storage DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867 node DatanodeRegistration(127.0.0.1:34991, datanodeUuid=a8c2c0f6-b427-49c3-bf61-4267a1027aa2, infoPort=34577, infoSecurePort=0, ipcPort=40885, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:10:04,392 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8cb92462026cdd83 with lease ID 0x8f6b40ac3caacf08: Processing first storage report for DS-9ae6cc25-76be-4957-8e40-bd606a865c6d from datanode DatanodeRegistration(127.0.0.1:34991, datanodeUuid=a8c2c0f6-b427-49c3-bf61-4267a1027aa2, infoPort=34577, infoSecurePort=0, ipcPort=40885, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299) 2024-12-02T14:10:04,392 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8cb92462026cdd83 with lease ID 0x8f6b40ac3caacf08: from storage DS-9ae6cc25-76be-4957-8e40-bd606a865c6d node DatanodeRegistration(127.0.0.1:34991, datanodeUuid=a8c2c0f6-b427-49c3-bf61-4267a1027aa2, infoPort=34577, infoSecurePort=0, ipcPort=40885, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:10:04,444 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f85c2b2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/java.io.tmpdir/jetty-localhost-34129-hadoop-hdfs-3_4_1-tests_jar-_-any-6537047196219076939/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:04,444 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c4cb941{HTTP/1.1, (http/1.1)}{localhost:34129} 2024-12-02T14:10:04,444 INFO [Time-limited test {}] server.Server(415): Started @114107ms 2024-12-02T14:10:04,445 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:10:04,500 WARN [Thread-892 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data9/current/BP-272717943-172.17.0.2-1733148592299/current, will proceed with Du for space computation calculation, 2024-12-02T14:10:04,500 WARN [Thread-893 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data10/current/BP-272717943-172.17.0.2-1733148592299/current, will proceed with Du for space computation calculation, 2024-12-02T14:10:04,516 WARN [Thread-881 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:10:04,518 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c9678ac169d769e with lease ID 0x8f6b40ac3caacf09: Processing first storage report for DS-695eb987-1cce-4381-8f57-7efc92958691 from datanode DatanodeRegistration(127.0.0.1:42087, datanodeUuid=a29bd2e9-6dac-4b9a-925d-94ad71437c59, infoPort=43649, infoSecurePort=0, ipcPort=43565, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299) 2024-12-02T14:10:04,519 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c9678ac169d769e with lease ID 0x8f6b40ac3caacf09: from storage DS-695eb987-1cce-4381-8f57-7efc92958691 node DatanodeRegistration(127.0.0.1:42087, datanodeUuid=a29bd2e9-6dac-4b9a-925d-94ad71437c59, infoPort=43649, infoSecurePort=0, ipcPort=43565, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:10:04,519 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c9678ac169d769e with lease ID 0x8f6b40ac3caacf09: Processing first storage report for DS-729a8445-c7bb-4d90-be89-48c0df721e76 from datanode DatanodeRegistration(127.0.0.1:42087, datanodeUuid=a29bd2e9-6dac-4b9a-925d-94ad71437c59, infoPort=43649, infoSecurePort=0, ipcPort=43565, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299) 2024-12-02T14:10:04,519 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c9678ac169d769e with lease ID 0x8f6b40ac3caacf09: from storage DS-729a8445-c7bb-4d90-be89-48c0df721e76 node DatanodeRegistration(127.0.0.1:42087, datanodeUuid=a29bd2e9-6dac-4b9a-925d-94ad71437c59, infoPort=43649, infoSecurePort=0, ipcPort=43565, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:10:04,561 WARN [ResponseProcessor for block BP-272717943-172.17.0.2-1733148592299:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-272717943-172.17.0.2-1733148592299:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-272717943-172.17.0.2-1733148592299:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:04,561 WARN [ResponseProcessor for block BP-272717943-172.17.0.2-1733148592299:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-272717943-172.17.0.2-1733148592299:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:04,562 WARN [ResponseProcessor for block BP-272717943-172.17.0.2-1733148592299:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-272717943-172.17.0.2-1733148592299:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-272717943-172.17.0.2-1733148592299:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:04,562 WARN [DataStreamer for file /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 block BP-272717943-172.17.0.2-1733148592299:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK], DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]) is bad. 2024-12-02T14:10:04,562 WARN [DataStreamer for file /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 block BP-272717943-172.17.0.2-1733148592299:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK], DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]) is bad. 2024-12-02T14:10:04,562 WARN [ResponseProcessor for block BP-272717943-172.17.0.2-1733148592299:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-272717943-172.17.0.2-1733148592299:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-272717943-172.17.0.2-1733148592299:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:04,562 WARN [PacketResponder: BP-272717943-172.17.0.2-1733148592299:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44871] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:04,562 WARN [DataStreamer for file /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 block BP-272717943-172.17.0.2-1733148592299:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK], DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]) is bad. 2024-12-02T14:10:04,562 WARN [DataStreamer for file /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta block BP-272717943-172.17.0.2-1733148592299:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK], DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]) is bad. 2024-12-02T14:10:04,562 WARN [PacketResponder: BP-272717943-172.17.0.2-1733148592299:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44871] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:04,563 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:34540 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34540 dst: /127.0.0.1:37515 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:04,563 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1058729929_22 at /127.0.0.1:34562 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:37515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34562 dst: /127.0.0.1:37515 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:04,563 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:39348 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:44871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39348 dst: /127.0.0.1:44871 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:04,563 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1058729929_22 at /127.0.0.1:39390 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:44871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39390 dst: /127.0.0.1:44871 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:04,563 WARN [PacketResponder: BP-272717943-172.17.0.2-1733148592299:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:44871] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:04,563 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:39352 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:44871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39352 dst: /127.0.0.1:44871 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:04,564 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:34534 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34534 dst: /127.0.0.1:37515 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:04,564 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bb5d847{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:04,564 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1576990072_22 at /127.0.0.1:39316 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:44871:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39316 dst: /127.0.0.1:44871 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:04,565 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6e5e2c7f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:10:04,565 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1576990072_22 at /127.0.0.1:34512 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34512 dst: /127.0.0.1:37515 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:04,565 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:10:04,565 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78ab676e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:10:04,565 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55c8142a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir/,STOPPED} 2024-12-02T14:10:04,566 WARN [BP-272717943-172.17.0.2-1733148592299 heartbeating to localhost/127.0.0.1:33497 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:10:04,566 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:10:04,567 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:10:04,567 WARN [BP-272717943-172.17.0.2-1733148592299 heartbeating to localhost/127.0.0.1:33497 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-272717943-172.17.0.2-1733148592299 (Datanode Uuid 67b633d0-c428-40af-9668-81c34e41e2e9) service to localhost/127.0.0.1:33497 2024-12-02T14:10:04,567 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data3/current/BP-272717943-172.17.0.2-1733148592299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:04,568 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:10:04,568 WARN [DataStreamer for file /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 block BP-272717943-172.17.0.2-1733148592299:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Sender.send(Sender.java:84) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Sender.writeBlock(Sender.java:171) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1922) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:04,569 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data4/current/BP-272717943-172.17.0.2-1733148592299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:04,569 WARN [DataStreamer for file /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta block BP-272717943-172.17.0.2-1733148592299:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:04,569 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:58802 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58802 dst: /127.0.0.1:37515 java.io.IOException: The stream is closed at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:117) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:914) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:04,569 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@6c6ef070 {}] datanode.DataXceiver(331): 127.0.0.1:37515:DataXceiver error processing unknown operation src: /127.0.0.1:58800 dst: /127.0.0.1:37515 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:04,570 WARN [DataStreamer for file /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 block BP-272717943-172.17.0.2-1733148592299:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:04,570 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1576990072_22 at /127.0.0.1:58804 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58804 dst: /127.0.0.1:37515 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:04,573 WARN [ResponseProcessor for block BP-272717943-172.17.0.2-1733148592299:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-272717943-172.17.0.2-1733148592299:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:04,574 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@72b840c3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:04,574 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38184680{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:10:04,574 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:10:04,574 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aa07d80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:10:04,574 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@475f8022{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir/,STOPPED} 2024-12-02T14:10:04,575 WARN [BP-272717943-172.17.0.2-1733148592299 heartbeating to localhost/127.0.0.1:33497 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:10:04,575 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:10:04,575 WARN [BP-272717943-172.17.0.2-1733148592299 heartbeating to localhost/127.0.0.1:33497 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-272717943-172.17.0.2-1733148592299 (Datanode Uuid 9d35c68d-d059-4d65-a2cc-1e4d8b09039a) service to localhost/127.0.0.1:33497 2024-12-02T14:10:04,575 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:10:04,576 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data1/current/BP-272717943-172.17.0.2-1733148592299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:04,576 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data2/current/BP-272717943-172.17.0.2-1733148592299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:04,576 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:10:04,580 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470., hostname=a3a61c9ba14f,32833,1733148592935, seqNum=2] 2024-12-02T14:10:04,581 ERROR [FSHLog-0-hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b-prefix:a3a61c9ba14f,32833,1733148592935 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:04,581 WARN [FSHLog-0-hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b-prefix:a3a61c9ba14f,32833,1733148592935 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:04,582 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:04,582 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C32833%2C1733148592935:(num 1733148593346) roll requested 2024-12-02T14:10:04,582 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C32833%2C1733148592935.1733148604582 2024-12-02T14:10:04,588 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:04,588 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:04,588 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:04,588 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:04,588 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:04,588 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148604582 2024-12-02T14:10:04,589 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:04,589 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:04,590 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-02T14:10:04,590 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-02T14:10:04,590 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 2024-12-02T14:10:04,592 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34577:34577),(127.0.0.1/127.0.0.1:43649:43649)] 2024-12-02T14:10:04,592 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 is not closed yet, will try archiving it next time 2024-12-02T14:10:04,593 WARN [IPC Server handler 3 on default port 33497 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-12-02T14:10:04,597 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 after 5ms 2024-12-02T14:10:05,018 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:05,940 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:06,592 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:06,594 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148604582 2024-12-02T14:10:06,595 WARN [ResponseProcessor for block BP-272717943-172.17.0.2-1733148592299:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-272717943-172.17.0.2-1733148592299:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:06,596 WARN [DataStreamer for file /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148604582 block BP-272717943-172.17.0.2-1733148592299:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK], DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]) is bad. 2024-12-02T14:10:06,598 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:54492 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:42087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54492 dst: /127.0.0.1:42087 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:06,597 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:38086 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:34991:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38086 dst: /127.0.0.1:34991 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:06,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30008f24{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:06,602 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54c06144{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:10:06,602 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:10:06,602 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@458e5bcf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:10:06,602 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3df1987c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir/,STOPPED} 2024-12-02T14:10:06,604 WARN [BP-272717943-172.17.0.2-1733148592299 heartbeating to localhost/127.0.0.1:33497 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:10:06,604 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:10:06,604 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:10:06,604 WARN [BP-272717943-172.17.0.2-1733148592299 heartbeating to localhost/127.0.0.1:33497 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-272717943-172.17.0.2-1733148592299 (Datanode Uuid a8c2c0f6-b427-49c3-bf61-4267a1027aa2) service to localhost/127.0.0.1:33497 2024-12-02T14:10:06,604 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data7/current/BP-272717943-172.17.0.2-1733148592299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:06,605 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data8/current/BP-272717943-172.17.0.2-1733148592299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:06,605 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:10:07,019 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:07,941 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:08,594 WARN [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]] 2024-12-02T14:10:08,594 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:08,594 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C32833%2C1733148592935:(num 1733148604582) roll requested 2024-12-02T14:10:08,595 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C32833%2C1733148592935.1733148608595 2024-12-02T14:10:08,599 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 after 4009ms 2024-12-02T14:10:08,602 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:08,603 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK], DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]) is bad. 2024-12-02T14:10:08,603 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741839_1021 2024-12-02T14:10:08,607 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK] 2024-12-02T14:10:08,610 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T14:10:08,613 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:08,613 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK], DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]) is bad. 2024-12-02T14:10:08,613 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741840_1022 2024-12-02T14:10:08,614 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK] 2024-12-02T14:10:08,617 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:08,617 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:08,617 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:08,617 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:08,618 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:08,618 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148604582 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148608595 2024-12-02T14:10:08,619 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39631:39631),(127.0.0.1/127.0.0.1:43649:43649)] 2024-12-02T14:10:08,619 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 is not closed yet, will try archiving it next time 2024-12-02T14:10:08,619 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148604582 is not closed yet, will try archiving it next time 2024-12-02T14:10:08,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42087 is added to blk_1073741838_1020 (size=3600) 2024-12-02T14:10:09,019 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:09,021 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 is not closed yet, will try archiving it next time 2024-12-02T14:10:09,942 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:10,534 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@23c1cc2d[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42087, datanodeUuid=a29bd2e9-6dac-4b9a-925d-94ad71437c59, infoPort=43649, infoSecurePort=0, ipcPort=43565, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299):Failed to transfer BP-272717943-172.17.0.2-1733148592299:blk_1073741838_1020 to 127.0.0.1:37515 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:10,614 WARN [ResponseProcessor for block BP-272717943-172.17.0.2-1733148592299:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-272717943-172.17.0.2-1733148592299:blk_1073741841_1023 java.io.IOException: Bad response ERROR for BP-272717943-172.17.0.2-1733148592299:blk_1073741841_1023 from datanode DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:10,615 WARN [DataStreamer for file /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148608595 block BP-272717943-172.17.0.2-1733148592299:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK], DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]) is bad. 2024-12-02T14:10:10,615 WARN [PacketResponder: BP-272717943-172.17.0.2-1733148592299:blk_1073741841_1023, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:42087] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:10,615 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:35446 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:33603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35446 dst: /127.0.0.1:33603 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:10,615 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:54508 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:42087:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54508 dst: /127.0.0.1:42087 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:10,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f85c2b2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:10,616 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c4cb941{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:10:10,616 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:10:10,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47d01054{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:10:10,616 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f0760d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir/,STOPPED} 2024-12-02T14:10:10,617 WARN [BP-272717943-172.17.0.2-1733148592299 heartbeating to localhost/127.0.0.1:33497 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:10:10,617 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:10:10,617 WARN [BP-272717943-172.17.0.2-1733148592299 heartbeating to localhost/127.0.0.1:33497 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-272717943-172.17.0.2-1733148592299 (Datanode Uuid a29bd2e9-6dac-4b9a-925d-94ad71437c59) service to localhost/127.0.0.1:33497 2024-12-02T14:10:10,617 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:10:10,618 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data9/current/BP-272717943-172.17.0.2-1733148592299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:10,618 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data10/current/BP-272717943-172.17.0.2-1733148592299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:10,618 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:10:10,619 WARN [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK]] 2024-12-02T14:10:10,619 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:10,619 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C32833%2C1733148592935:(num 1733148608595) roll requested 2024-12-02T14:10:10,620 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C32833%2C1733148592935.1733148610619 2024-12-02T14:10:10,622 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:10,623 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK], DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]) is bad. 2024-12-02T14:10:10,623 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741842_1025 2024-12-02T14:10:10,623 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK] 2024-12-02T14:10:10,625 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:10,625 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK], DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]) is bad. 2024-12-02T14:10:10,625 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741843_1026 2024-12-02T14:10:10,626 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK] 2024-12-02T14:10:10,627 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:10,628 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK], DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]) is bad. 2024-12-02T14:10:10,628 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741844_1027 2024-12-02T14:10:10,628 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK] 2024-12-02T14:10:10,630 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:10,630 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK], DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]) is bad. 2024-12-02T14:10:10,630 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741845_1028 2024-12-02T14:10:10,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32833 {}] regionserver.HRegion(8855): Flush requested on 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:10:10,630 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6a07bf265a12f1bc622a306390bdc470 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T14:10:10,631 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK] 2024-12-02T14:10:10,632 WARN [IPC Server handler 4 on default port 33497 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:10:10,632 WARN [IPC Server handler 4 on default port 33497 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:10:10,632 WARN [IPC Server handler 4 on default port 33497 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:10:10,635 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:10,635 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:10,636 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:10,636 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:10,636 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:10,636 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148608595 with entries=7, filesize=7.25 KB; new WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148610619 2024-12-02T14:10:10,637 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39631:39631)] 2024-12-02T14:10:10,637 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 is not closed yet, will try archiving it next time 2024-12-02T14:10:10,637 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148608595 is not closed yet, will try archiving it next time 2024-12-02T14:10:10,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741841_1024 (size=7430) 2024-12-02T14:10:10,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/2c50f4ca87424f6f975788ee5b5ee43a is 1080, key is row0002/info:/1733148606606/Put/seqid=0 2024-12-02T14:10:10,652 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37515 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:10,652 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43798 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data6]'}, localName='127.0.0.1:33603', datanodeUuid='0465bcc1-b8c6-499b-b84f-3f7380979272', xmitsInProgress=0}:Exception transferring block BP-272717943-172.17.0.2-1733148592299:blk_1073741847_1030 to mirror 127.0.0.1:37515 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:10,652 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK], DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]) is bad. 2024-12-02T14:10:10,652 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43798 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T14:10:10,652 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741847_1030 2024-12-02T14:10:10,652 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43798 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:33603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43798 dst: /127.0.0.1:33603 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:10,653 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK] 2024-12-02T14:10:10,654 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:10,654 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK], DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]) is bad. 2024-12-02T14:10:10,654 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741848_1031 2024-12-02T14:10:10,654 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK] 2024-12-02T14:10:10,656 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:44871 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:10,656 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43810 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data6]'}, localName='127.0.0.1:33603', datanodeUuid='0465bcc1-b8c6-499b-b84f-3f7380979272', xmitsInProgress=0}:Exception transferring block BP-272717943-172.17.0.2-1733148592299:blk_1073741849_1032 to mirror 127.0.0.1:44871 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:10,657 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK], DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]) is bad. 2024-12-02T14:10:10,657 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741849_1032 2024-12-02T14:10:10,657 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43810 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T14:10:10,657 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43810 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:33603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43810 dst: /127.0.0.1:33603 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:10,657 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK] 2024-12-02T14:10:10,659 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42087 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:10,659 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43814 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data6]'}, localName='127.0.0.1:33603', datanodeUuid='0465bcc1-b8c6-499b-b84f-3f7380979272', xmitsInProgress=0}:Exception transferring block BP-272717943-172.17.0.2-1733148592299:blk_1073741850_1033 to mirror 127.0.0.1:42087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:10,660 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK], DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]) is bad. 2024-12-02T14:10:10,660 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43814 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T14:10:10,660 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741850_1033 2024-12-02T14:10:10,660 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43814 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:33603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43814 dst: /127.0.0.1:33603 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:10,660 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK] 2024-12-02T14:10:10,661 WARN [IPC Server handler 0 on default port 33497 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:10:10,661 WARN [IPC Server handler 0 on default port 33497 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:10:10,661 WARN [IPC Server handler 0 on default port 33497 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:10:10,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741851_1034 (size=10347) 2024-12-02T14:10:11,020 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:11,040 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 is not closed yet, will try archiving it next time 2024-12-02T14:10:11,067 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/2c50f4ca87424f6f975788ee5b5ee43a 2024-12-02T14:10:11,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/2c50f4ca87424f6f975788ee5b5ee43a as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/2c50f4ca87424f6f975788ee5b5ee43a 2024-12-02T14:10:11,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/2c50f4ca87424f6f975788ee5b5ee43a, entries=5, sequenceid=11, filesize=10.1 K 2024-12-02T14:10:11,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 6a07bf265a12f1bc622a306390bdc470 in 456ms, sequenceid=11, compaction requested=false 2024-12-02T14:10:11,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6a07bf265a12f1bc622a306390bdc470: 2024-12-02T14:10:11,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32833 {}] regionserver.HRegion(8855): Flush requested on 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:10:11,261 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6a07bf265a12f1bc622a306390bdc470 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-02T14:10:11,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/99a6409b667c40c08bf043f29bc1670a is 1080, key is row0007/info:/1733148610632/Put/seqid=0 2024-12-02T14:10:11,267 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:11,267 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK], DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]) is bad. 2024-12-02T14:10:11,267 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741852_1035 2024-12-02T14:10:11,267 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK] 2024-12-02T14:10:11,268 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:11,269 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK], DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]) is bad. 2024-12-02T14:10:11,269 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741853_1036 2024-12-02T14:10:11,269 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK] 2024-12-02T14:10:11,270 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:11,270 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK], DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]) is bad. 2024-12-02T14:10:11,270 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741854_1037 2024-12-02T14:10:11,271 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK] 2024-12-02T14:10:11,273 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34991 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:11,273 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43838 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data6]'}, localName='127.0.0.1:33603', datanodeUuid='0465bcc1-b8c6-499b-b84f-3f7380979272', xmitsInProgress=0}:Exception transferring block BP-272717943-172.17.0.2-1733148592299:blk_1073741855_1038 to mirror 127.0.0.1:34991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:11,273 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK], DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]) is bad. 2024-12-02T14:10:11,273 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741855_1038 2024-12-02T14:10:11,273 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43838 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T14:10:11,273 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43838 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:33603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43838 dst: /127.0.0.1:33603 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:11,274 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK] 2024-12-02T14:10:11,274 WARN [IPC Server handler 4 on default port 33497 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:10:11,274 WARN [IPC Server handler 4 on default port 33497 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:10:11,274 WARN [IPC Server handler 4 on default port 33497 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:10:11,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741856_1039 (size=12506) 2024-12-02T14:10:11,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/99a6409b667c40c08bf043f29bc1670a 2024-12-02T14:10:11,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/99a6409b667c40c08bf043f29bc1670a as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/99a6409b667c40c08bf043f29bc1670a 2024-12-02T14:10:11,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/99a6409b667c40c08bf043f29bc1670a, entries=7, sequenceid=24, filesize=12.2 K 2024-12-02T14:10:11,699 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 6a07bf265a12f1bc622a306390bdc470 in 437ms, sequenceid=24, compaction requested=false 2024-12-02T14:10:11,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6a07bf265a12f1bc622a306390bdc470: 2024-12-02T14:10:11,699 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-02T14:10:11,699 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:10:11,699 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/99a6409b667c40c08bf043f29bc1670a because midkey is the same as first or last row 2024-12-02T14:10:11,942 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:12,637 WARN [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK]] 2024-12-02T14:10:12,637 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:12,638 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C32833%2C1733148592935:(num 1733148610619) roll requested 2024-12-02T14:10:12,639 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C32833%2C1733148592935.1733148612638 2024-12-02T14:10:12,646 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43864 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data6]'}, localName='127.0.0.1:33603', datanodeUuid='0465bcc1-b8c6-499b-b84f-3f7380979272', xmitsInProgress=0}:Exception transferring block BP-272717943-172.17.0.2-1733148592299:blk_1073741857_1040 to mirror 127.0.0.1:37515 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:12,646 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37515 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:12,646 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43864 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-02T14:10:12,647 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK], DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]) is bad. 2024-12-02T14:10:12,647 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741857_1040 2024-12-02T14:10:12,647 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43864 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:33603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43864 dst: /127.0.0.1:33603 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:12,648 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK] 2024-12-02T14:10:12,650 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43866 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data6]'}, localName='127.0.0.1:33603', datanodeUuid='0465bcc1-b8c6-499b-b84f-3f7380979272', xmitsInProgress=0}:Exception transferring block BP-272717943-172.17.0.2-1733148592299:blk_1073741858_1041 to mirror 127.0.0.1:34991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:12,650 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34991 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:12,651 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43866 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-02T14:10:12,651 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK], DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]) is bad. 2024-12-02T14:10:12,651 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741858_1041 2024-12-02T14:10:12,651 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43866 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:33603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43866 dst: /127.0.0.1:33603 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:12,652 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK] 2024-12-02T14:10:12,653 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:12,654 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK], DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]) is bad. 2024-12-02T14:10:12,654 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741859_1042 2024-12-02T14:10:12,655 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK] 2024-12-02T14:10:12,657 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42087 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:12,657 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43872 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data6]'}, localName='127.0.0.1:33603', datanodeUuid='0465bcc1-b8c6-499b-b84f-3f7380979272', xmitsInProgress=0}:Exception transferring block BP-272717943-172.17.0.2-1733148592299:blk_1073741860_1043 to mirror 127.0.0.1:42087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:12,658 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK], DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]) is bad. 2024-12-02T14:10:12,658 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43872 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-02T14:10:12,658 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741860_1043 2024-12-02T14:10:12,658 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43872 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:33603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43872 dst: /127.0.0.1:33603 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:12,658 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK] 2024-12-02T14:10:12,659 WARN [IPC Server handler 2 on default port 33497 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:10:12,659 WARN [IPC Server handler 2 on default port 33497 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:10:12,659 WARN [IPC Server handler 2 on default port 33497 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:10:12,662 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:12,662 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:12,662 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:12,662 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:12,662 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:12,662 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148610619 with entries=17, filesize=17.07 KB; new WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148612638 2024-12-02T14:10:12,663 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39631:39631)] 2024-12-02T14:10:12,663 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 is not closed yet, will try archiving it next time 2024-12-02T14:10:12,663 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148610619 is not closed yet, will try archiving it next time 2024-12-02T14:10:12,664 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148604582 to hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/oldWALs/a3a61c9ba14f%2C32833%2C1733148592935.1733148604582 2024-12-02T14:10:12,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741846_1029 (size=17486) 2024-12-02T14:10:12,665 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148608595 to hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/oldWALs/a3a61c9ba14f%2C32833%2C1733148592935.1733148608595 2024-12-02T14:10:12,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32833 {}] regionserver.HRegion(8855): Flush requested on 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:10:12,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6a07bf265a12f1bc622a306390bdc470 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-02T14:10:12,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/5d8af950a109418aaeb62c54cbec1c0e is 1079, key is tmprow/info:/1733148612695/Put/seqid=0 2024-12-02T14:10:12,706 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:12,707 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK], DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]) is bad. 2024-12-02T14:10:12,707 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741862_1045 2024-12-02T14:10:12,708 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK] 2024-12-02T14:10:12,709 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:12,709 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK], DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]) is bad. 2024-12-02T14:10:12,709 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741863_1046 2024-12-02T14:10:12,710 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK] 2024-12-02T14:10:12,711 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:12,711 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK], DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]) is bad. 2024-12-02T14:10:12,711 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741864_1047 2024-12-02T14:10:12,712 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK] 2024-12-02T14:10:12,714 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34991 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:12,714 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK], DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]) is bad. 2024-12-02T14:10:12,714 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43880 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data6]'}, localName='127.0.0.1:33603', datanodeUuid='0465bcc1-b8c6-499b-b84f-3f7380979272', xmitsInProgress=0}:Exception transferring block BP-272717943-172.17.0.2-1733148592299:blk_1073741865_1048 to mirror 127.0.0.1:34991 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:12,714 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741865_1048 2024-12-02T14:10:12,714 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43880 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T14:10:12,715 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43880 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:33603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43880 dst: /127.0.0.1:33603 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:12,715 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK] 2024-12-02T14:10:12,716 WARN [IPC Server handler 4 on default port 33497 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:10:12,716 WARN [IPC Server handler 4 on default port 33497 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:10:12,716 WARN [IPC Server handler 4 on default port 33497 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:10:12,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741866_1049 (size=6027) 2024-12-02T14:10:13,020 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:13,065 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 is not closed yet, will try archiving it next time 2024-12-02T14:10:13,121 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/5d8af950a109418aaeb62c54cbec1c0e 2024-12-02T14:10:13,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/5d8af950a109418aaeb62c54cbec1c0e as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/5d8af950a109418aaeb62c54cbec1c0e 2024-12-02T14:10:13,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/5d8af950a109418aaeb62c54cbec1c0e, entries=1, sequenceid=34, filesize=5.9 K 2024-12-02T14:10:13,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 6a07bf265a12f1bc622a306390bdc470 in 448ms, sequenceid=34, compaction requested=true 2024-12-02T14:10:13,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6a07bf265a12f1bc622a306390bdc470: 2024-12-02T14:10:13,145 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-02T14:10:13,145 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:10:13,145 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/99a6409b667c40c08bf043f29bc1670a because midkey is the same as first or last row 2024-12-02T14:10:13,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a07bf265a12f1bc622a306390bdc470:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T14:10:13,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:10:13,145 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:10:13,147 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T14:10:13,147 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.HStore(1541): 6a07bf265a12f1bc622a306390bdc470/info is initiating minor compaction (all files) 2024-12-02T14:10:13,147 INFO [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6a07bf265a12f1bc622a306390bdc470/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:10:13,147 INFO [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/2c50f4ca87424f6f975788ee5b5ee43a, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/99a6409b667c40c08bf043f29bc1670a, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/5d8af950a109418aaeb62c54cbec1c0e] into tmpdir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp, totalSize=28.2 K 2024-12-02T14:10:13,148 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2c50f4ca87424f6f975788ee5b5ee43a, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733148606606 2024-12-02T14:10:13,148 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] compactions.Compactor(225): Compacting 99a6409b667c40c08bf043f29bc1670a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733148610632 2024-12-02T14:10:13,149 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5d8af950a109418aaeb62c54cbec1c0e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733148612695 2024-12-02T14:10:13,161 INFO [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a07bf265a12f1bc622a306390bdc470#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:10:13,161 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/e068f777b41f4561bebe1d9df08f2e00 is 1080, key is row0002/info:/1733148606606/Put/seqid=0 2024-12-02T14:10:13,163 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:13,163 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK], DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]) is bad. 2024-12-02T14:10:13,163 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741867_1050 2024-12-02T14:10:13,164 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK] 2024-12-02T14:10:13,165 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:13,165 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK], DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]) is bad. 2024-12-02T14:10:13,165 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741868_1051 2024-12-02T14:10:13,166 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK] 2024-12-02T14:10:13,167 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:13,167 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK], DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]) is bad. 2024-12-02T14:10:13,167 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741869_1052 2024-12-02T14:10:13,168 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK] 2024-12-02T14:10:13,170 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42087 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:13,170 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43912 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data6]'}, localName='127.0.0.1:33603', datanodeUuid='0465bcc1-b8c6-499b-b84f-3f7380979272', xmitsInProgress=0}:Exception transferring block BP-272717943-172.17.0.2-1733148592299:blk_1073741870_1053 to mirror 127.0.0.1:42087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:13,170 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK], DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]) is bad. 2024-12-02T14:10:13,170 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741870_1053 2024-12-02T14:10:13,170 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43912 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T14:10:13,170 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:43912 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:33603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43912 dst: /127.0.0.1:33603 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:13,171 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK] 2024-12-02T14:10:13,171 WARN [IPC Server handler 2 on default port 33497 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:10:13,171 WARN [IPC Server handler 2 on default port 33497 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:10:13,171 WARN [IPC Server handler 2 on default port 33497 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:10:13,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741871_1054 (size=17994) 2024-12-02T14:10:13,586 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/e068f777b41f4561bebe1d9df08f2e00 as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/e068f777b41f4561bebe1d9df08f2e00 2024-12-02T14:10:13,596 INFO [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6a07bf265a12f1bc622a306390bdc470/info of 6a07bf265a12f1bc622a306390bdc470 into e068f777b41f4561bebe1d9df08f2e00(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:10:13,596 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6a07bf265a12f1bc622a306390bdc470: 2024-12-02T14:10:13,596 INFO [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470., storeName=6a07bf265a12f1bc622a306390bdc470/info, priority=13, startTime=1733148613145; duration=0sec 2024-12-02T14:10:13,596 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-02T14:10:13,596 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:10:13,596 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/e068f777b41f4561bebe1d9df08f2e00 because midkey is the same as first or last row 2024-12-02T14:10:13,597 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-02T14:10:13,597 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:10:13,597 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/e068f777b41f4561bebe1d9df08f2e00 because midkey is the same as first or last row 2024-12-02T14:10:13,597 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-02T14:10:13,597 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:10:13,597 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/e068f777b41f4561bebe1d9df08f2e00 because midkey is the same as first or last row 2024-12-02T14:10:13,597 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:10:13,597 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a07bf265a12f1bc622a306390bdc470:info 2024-12-02T14:10:13,943 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:14,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32833 {}] regionserver.HRegion(8855): Flush requested on 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:10:14,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6a07bf265a12f1bc622a306390bdc470 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-02T14:10:14,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/72ef0b45e63f460caf4572be4b3c8c06 is 1079, key is tmprow/info:/1733148614125/Put/seqid=0 2024-12-02T14:10:14,134 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:14,134 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK], DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]) is bad. 2024-12-02T14:10:14,134 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741872_1055 2024-12-02T14:10:14,135 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK] 2024-12-02T14:10:14,136 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:14,136 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK], DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]) is bad. 2024-12-02T14:10:14,136 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741873_1056 2024-12-02T14:10:14,137 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:44871,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK] 2024-12-02T14:10:14,138 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:14,138 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK], DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]) is bad. 2024-12-02T14:10:14,138 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741874_1057 2024-12-02T14:10:14,139 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK] 2024-12-02T14:10:14,140 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:14,140 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK], DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]) is bad. 2024-12-02T14:10:14,140 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741875_1058 2024-12-02T14:10:14,141 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK] 2024-12-02T14:10:14,141 WARN [IPC Server handler 1 on default port 33497 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T14:10:14,142 WARN [IPC Server handler 1 on default port 33497 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T14:10:14,142 WARN [IPC Server handler 1 on default port 33497 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T14:10:14,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741876_1059 (size=6027) 2024-12-02T14:10:14,264 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@d0a93d1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33603, datanodeUuid=0465bcc1-b8c6-499b-b84f-3f7380979272, infoPort=39631, infoSecurePort=0, ipcPort=35877, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299):Failed to transfer BP-272717943-172.17.0.2-1733148592299:blk_1073741841_1024 to 127.0.0.1:42087 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:14,264 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3cd7c00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33603, datanodeUuid=0465bcc1-b8c6-499b-b84f-3f7380979272, infoPort=39631, infoSecurePort=0, ipcPort=35877, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299):Failed to transfer BP-272717943-172.17.0.2-1733148592299:blk_1073741851_1034 to 127.0.0.1:34991 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:14,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/72ef0b45e63f460caf4572be4b3c8c06 2024-12-02T14:10:14,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/72ef0b45e63f460caf4572be4b3c8c06 as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/72ef0b45e63f460caf4572be4b3c8c06 2024-12-02T14:10:14,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/72ef0b45e63f460caf4572be4b3c8c06, entries=1, sequenceid=45, filesize=5.9 K 2024-12-02T14:10:14,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 6a07bf265a12f1bc622a306390bdc470 in 441ms, sequenceid=45, compaction requested=false 2024-12-02T14:10:14,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6a07bf265a12f1bc622a306390bdc470: 2024-12-02T14:10:14,567 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-02T14:10:14,567 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:10:14,567 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/e068f777b41f4561bebe1d9df08f2e00 because midkey is the same as first or last row 2024-12-02T14:10:14,664 WARN [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-02T14:10:14,664 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:14,755 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:10:14,758 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:10:14,759 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:10:14,759 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:10:14,759 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:10:14,759 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39eb7ad6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:10:14,760 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aaad1eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:10:14,852 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e407f59{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/java.io.tmpdir/jetty-localhost-42379-hadoop-hdfs-3_4_1-tests_jar-_-any-12945280251729317510/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:14,853 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@413b5d87{HTTP/1.1, (http/1.1)}{localhost:42379} 2024-12-02T14:10:14,853 INFO [Time-limited test {}] server.Server(415): Started @124515ms 2024-12-02T14:10:14,854 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:10:14,927 WARN [Thread-985 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:10:14,935 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x693a3f0c6106e1dd with lease ID 0x8f6b40ac3caacf0a: from storage DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c node DatanodeRegistration(127.0.0.1:39105, datanodeUuid=67b633d0-c428-40af-9668-81c34e41e2e9, infoPort=45703, infoSecurePort=0, ipcPort=42839, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:10:14,937 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x693a3f0c6106e1dd with lease ID 0x8f6b40ac3caacf0a: from storage DS-9694b8e1-55bb-4022-bfe4-12f1205bf9ac node DatanodeRegistration(127.0.0.1:39105, datanodeUuid=67b633d0-c428-40af-9668-81c34e41e2e9, infoPort=45703, infoSecurePort=0, ipcPort=42839, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299), blocks: 6, hasStaleStorage: false, processing time: 3 msecs, invalidatedBlocks: 0 2024-12-02T14:10:15,021 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:15,265 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@d0a93d1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33603, datanodeUuid=0465bcc1-b8c6-499b-b84f-3f7380979272, infoPort=39631, infoSecurePort=0, ipcPort=35877, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299):Failed to transfer BP-272717943-172.17.0.2-1733148592299:blk_1073741846_1029 to 127.0.0.1:42087 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:15,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741856_1039 (size=12506) 2024-12-02T14:10:15,943 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:16,665 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:17,021 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:17,266 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3cd7c00[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33603, datanodeUuid=0465bcc1-b8c6-499b-b84f-3f7380979272, infoPort=39631, infoSecurePort=0, ipcPort=35877, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299):Failed to transfer BP-272717943-172.17.0.2-1733148592299:blk_1073741866_1049 to 127.0.0.1:34991 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:17,266 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@d0a93d1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33603, datanodeUuid=0465bcc1-b8c6-499b-b84f-3f7380979272, infoPort=39631, infoSecurePort=0, ipcPort=35877, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299):Failed to transfer BP-272717943-172.17.0.2-1733148592299:blk_1073741871_1054 to 127.0.0.1:42087 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:17,943 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:18,263 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@d0a93d1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33603, datanodeUuid=0465bcc1-b8c6-499b-b84f-3f7380979272, infoPort=39631, infoSecurePort=0, ipcPort=35877, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299):Failed to transfer BP-272717943-172.17.0.2-1733148592299:blk_1073741876_1059 to 127.0.0.1:34991 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:18,665 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:19,022 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:19,945 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:20,666 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:21,023 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:21,945 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:22,666 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:22,865 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T14:10:23,023 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:23,115 ERROR [FSHLog-0-hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData-prefix:a3a61c9ba14f,37979,1733148592890 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:23,116 WARN [FSHLog-0-hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData-prefix:a3a61c9ba14f,37979,1733148592890 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:23,116 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C37979%2C1733148592890:(num 1733148593043) roll requested 2024-12-02T14:10:23,116 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C37979%2C1733148592890.1733148623116 2024-12-02T14:10:23,120 WARN [Thread-1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:23,121 WARN [Thread-1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK], DatanodeInfoWithStorage[127.0.0.1:34991,DS-7b4795f8-0469-4438-b5ec-1bbfb07e4867,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]) is bad. 2024-12-02T14:10:23,121 WARN [Thread-1006 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741877_1060 2024-12-02T14:10:23,122 WARN [Thread-1006 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK] 2024-12-02T14:10:23,129 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:23,129 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:23,129 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:23,129 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:23,129 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:23,130 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148623116 2024-12-02T14:10:23,130 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:23,130 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:23,130 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 2024-12-02T14:10:23,130 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39631:39631),(127.0.0.1/127.0.0.1:45703:45703)] 2024-12-02T14:10:23,130 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 is not closed yet, will try archiving it next time 2024-12-02T14:10:23,131 WARN [IPC Server handler 2 on default port 33497 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 has not been closed. Lease recovery is in progress. RecoveryId = 1062 for block blk_1073741830_1014 2024-12-02T14:10:23,131 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 after 1ms 2024-12-02T14:10:23,946 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:24,666 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:24,952 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1c05b831 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-272717943-172.17.0.2-1733148592299:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:37515,null,null]) java.net.ConnectException: Call From a3a61c9ba14f/172.17.0.2 to localhost:36131 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-02T14:10:24,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741833_1019 (size=455) 2024-12-02T14:10:25,628 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 to hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/oldWALs/a3a61c9ba14f%2C32833%2C1733148592935.1733148593346 2024-12-02T14:10:25,631 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148610619 to hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/oldWALs/a3a61c9ba14f%2C32833%2C1733148592935.1733148610619 2024-12-02T14:10:25,934 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@35630ada[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39105, datanodeUuid=67b633d0-c428-40af-9668-81c34e41e2e9, infoPort=45703, infoSecurePort=0, ipcPort=42839, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299):Failed to transfer BP-272717943-172.17.0.2-1733148592299:blk_1073741833_1019 to 127.0.0.1:42087 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:25,946 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:26,667 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:27,134 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 after 4004ms 2024-12-02T14:10:27,947 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:28,668 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:28,935 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2dd3469e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39105, datanodeUuid=67b633d0-c428-40af-9668-81c34e41e2e9, infoPort=45703, infoSecurePort=0, ipcPort=42839, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299):Failed to transfer BP-272717943-172.17.0.2-1733148592299:blk_1073741832_1008 to 127.0.0.1:42087 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:28,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741836_1012 (size=76) 2024-12-02T14:10:29,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:10:29,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:10:29,947 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:30,608 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C32833%2C1733148592935.1733148630607 2024-12-02T14:10:30,616 WARN [Thread-1018 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42087 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:30,616 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1576990072_22 at /127.0.0.1:44624 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741879_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data6]'}, localName='127.0.0.1:33603', datanodeUuid='0465bcc1-b8c6-499b-b84f-3f7380979272', xmitsInProgress=0}:Exception transferring block BP-272717943-172.17.0.2-1733148592299:blk_1073741879_1063 to mirror 127.0.0.1:42087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:30,617 WARN [Thread-1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741879_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK], DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]) is bad. 2024-12-02T14:10:30,617 WARN [Thread-1018 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741879_1063 2024-12-02T14:10:30,617 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1576990072_22 at /127.0.0.1:44624 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741879_1063] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-02T14:10:30,617 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1576990072_22 at /127.0.0.1:44624 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741879_1063] {}] datanode.DataXceiver(331): 127.0.0.1:33603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44624 dst: /127.0.0.1:33603 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:30,618 WARN [Thread-1018 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK] 2024-12-02T14:10:30,627 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,627 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,627 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,627 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,627 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,628 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148612638 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148630607 2024-12-02T14:10:30,629 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39631:39631),(127.0.0.1/127.0.0.1:45703:45703)] 2024-12-02T14:10:30,629 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148612638 is not closed yet, will try archiving it next time 2024-12-02T14:10:30,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741861_1044 (size=13591) 2024-12-02T14:10:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32833 {}] regionserver.HRegion(8855): Flush requested on 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:10:30,642 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 6a07bf265a12f1bc622a306390bdc470 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-02T14:10:30,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/b68f5a3880ca48c1a368103e9d11277b is 1080, key is row0013/info:/1733148630631/Put/seqid=0 2024-12-02T14:10:30,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741881_1065 (size=11421) 2024-12-02T14:10:30,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741881_1065 (size=11421) 2024-12-02T14:10:30,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/b68f5a3880ca48c1a368103e9d11277b 2024-12-02T14:10:30,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/b68f5a3880ca48c1a368103e9d11277b as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/b68f5a3880ca48c1a368103e9d11277b 2024-12-02T14:10:30,668 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-02T14:10:30,669 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:30,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/b68f5a3880ca48c1a368103e9d11277b, entries=6, sequenceid=55, filesize=11.2 K 2024-12-02T14:10:30,671 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 6a07bf265a12f1bc622a306390bdc470 in 29ms, sequenceid=55, compaction requested=true 2024-12-02T14:10:30,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 6a07bf265a12f1bc622a306390bdc470: 2024-12-02T14:10:30,671 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-12-02T14:10:30,671 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:10:30,671 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/e068f777b41f4561bebe1d9df08f2e00 because midkey is the same as first or last row 2024-12-02T14:10:30,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6a07bf265a12f1bc622a306390bdc470:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T14:10:30,672 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:10:30,672 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:10:30,673 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T14:10:30,673 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.HStore(1541): 6a07bf265a12f1bc622a306390bdc470/info is initiating minor compaction (all files) 2024-12-02T14:10:30,673 INFO [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6a07bf265a12f1bc622a306390bdc470/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:10:30,673 INFO [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/e068f777b41f4561bebe1d9df08f2e00, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/72ef0b45e63f460caf4572be4b3c8c06, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/b68f5a3880ca48c1a368103e9d11277b] into tmpdir=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp, totalSize=34.6 K 2024-12-02T14:10:30,674 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] compactions.Compactor(225): Compacting e068f777b41f4561bebe1d9df08f2e00, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733148606606 2024-12-02T14:10:30,675 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] compactions.Compactor(225): Compacting 72ef0b45e63f460caf4572be4b3c8c06, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733148614125 2024-12-02T14:10:30,675 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] compactions.Compactor(225): Compacting b68f5a3880ca48c1a368103e9d11277b, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733148614536 2024-12-02T14:10:30,691 INFO [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6a07bf265a12f1bc622a306390bdc470#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:10:30,692 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/136ffa1924d34daeb8f992966754ce26 is 1080, key is row0002/info:/1733148606606/Put/seqid=0 2024-12-02T14:10:30,694 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:30,694 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK], DatanodeInfoWithStorage[127.0.0.1:39105,DS-f58e678d-58a3-4401-ba32-c9cf27e35b2c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]) is bad. 2024-12-02T14:10:30,694 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741882_1066 2024-12-02T14:10:30,694 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK] 2024-12-02T14:10:30,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741883_1067 (size=23502) 2024-12-02T14:10:30,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741883_1067 (size=23502) 2024-12-02T14:10:30,707 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/136ffa1924d34daeb8f992966754ce26 as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/136ffa1924d34daeb8f992966754ce26 2024-12-02T14:10:30,717 INFO [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 6a07bf265a12f1bc622a306390bdc470/info of 6a07bf265a12f1bc622a306390bdc470 into 136ffa1924d34daeb8f992966754ce26(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:10:30,717 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6a07bf265a12f1bc622a306390bdc470: 2024-12-02T14:10:30,717 INFO [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470., storeName=6a07bf265a12f1bc622a306390bdc470/info, priority=13, startTime=1733148630672; duration=0sec 2024-12-02T14:10:30,718 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-02T14:10:30,718 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:10:30,718 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/136ffa1924d34daeb8f992966754ce26 because midkey is the same as first or last row 2024-12-02T14:10:30,718 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-02T14:10:30,718 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:10:30,718 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/136ffa1924d34daeb8f992966754ce26 because midkey is the same as first or last row 2024-12-02T14:10:30,718 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-02T14:10:30,718 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:10:30,718 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/136ffa1924d34daeb8f992966754ce26 because midkey is the same as first or last row 2024-12-02T14:10:30,718 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:10:30,718 DEBUG [RS:0;a3a61c9ba14f:32833-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6a07bf265a12f1bc622a306390bdc470:info 2024-12-02T14:10:30,859 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T14:10:30,860 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:10:30,860 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:10:30,860 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:10:30,860 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:10:30,860 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T14:10:30,861 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T14:10:30,861 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=120022351, stopped=false 2024-12-02T14:10:30,861 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a3a61c9ba14f,37979,1733148592890 2024-12-02T14:10:30,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:10:30,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:10:30,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1009b4427530002, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:10:30,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:30,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:30,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1009b4427530002, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:30,863 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:10:30,864 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:10:30,864 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:10:30,864 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:10:30,865 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a3a61c9ba14f,32833,1733148592935' ***** 2024-12-02T14:10:30,865 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36269-0x1009b4427530002, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:10:30,865 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:10:30,865 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T14:10:30,865 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a3a61c9ba14f,36269,1733148593887' ***** 2024-12-02T14:10:30,865 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:10:30,865 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T14:10:30,866 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T14:10:30,866 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T14:10:30,866 INFO [RS:0;a3a61c9ba14f:32833 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T14:10:30,866 INFO [RS:0;a3a61c9ba14f:32833 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T14:10:30,866 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T14:10:30,866 INFO [RS:1;a3a61c9ba14f:36269 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T14:10:30,866 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(3091): Received CLOSE for 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:10:30,866 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T14:10:30,866 INFO [RS:1;a3a61c9ba14f:36269 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T14:10:30,866 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.HRegionServer(959): stopping server a3a61c9ba14f,36269,1733148593887 2024-12-02T14:10:30,866 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:10:30,866 INFO [RS:1;a3a61c9ba14f:36269 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;a3a61c9ba14f:36269. 2024-12-02T14:10:30,866 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(959): stopping server a3a61c9ba14f,32833,1733148592935 2024-12-02T14:10:30,867 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:10:30,867 DEBUG [RS:1;a3a61c9ba14f:36269 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:10:30,867 DEBUG [RS:1;a3a61c9ba14f:36269 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:10:30,867 INFO [RS:0;a3a61c9ba14f:32833 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a3a61c9ba14f:32833. 2024-12-02T14:10:30,867 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.HRegionServer(976): stopping server a3a61c9ba14f,36269,1733148593887; all regions closed. 2024-12-02T14:10:30,867 DEBUG [RS:0;a3a61c9ba14f:32833 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:10:30,867 DEBUG [RS:0;a3a61c9ba14f:32833 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:10:30,867 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T14:10:30,867 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T14:10:30,867 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6a07bf265a12f1bc622a306390bdc470, disabling compactions & flushes 2024-12-02T14:10:30,867 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T14:10:30,867 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:10:30,867 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T14:10:30,867 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:10:30,867 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. after waiting 0 ms 2024-12-02T14:10:30,867 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,867 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:10:30,868 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,868 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-02T14:10:30,868 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 6a07bf265a12f1bc622a306390bdc470 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-12-02T14:10:30,868 DEBUG [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(1325): Online Regions={6a07bf265a12f1bc622a306390bdc470=TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470., 1588230740=hbase:meta,,1.1588230740} 2024-12-02T14:10:30,868 DEBUG [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6a07bf265a12f1bc622a306390bdc470 2024-12-02T14:10:30,868 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:10:30,868 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,868 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:10:30,868 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:10:30,868 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:10:30,868 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:10:30,868 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,868 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-02T14:10:30,868 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,869 ERROR [FSHLog-0-hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b-prefix:a3a61c9ba14f,32833,1733148592935.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:30,869 WARN [FSHLog-0-hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b-prefix:a3a61c9ba14f,32833,1733148592935.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:30,869 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C32833%2C1733148592935.meta:.meta(num 1733148593746) roll requested 2024-12-02T14:10:30,869 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:30,869 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:30,869 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 2024-12-02T14:10:30,869 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148630869.meta 2024-12-02T14:10:30,870 WARN [IPC Server handler 3 on default port 33497 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 has not been closed. Lease recovery is in progress. RecoveryId = 1068 for block blk_1073741837_1013 2024-12-02T14:10:30,870 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 after 1ms 2024-12-02T14:10:30,872 WARN [Thread-1041 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:30,873 WARN [Thread-1041 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741884_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK], DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]) is bad. 2024-12-02T14:10:30,873 WARN [Thread-1041 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741884_1069 2024-12-02T14:10:30,873 WARN [Thread-1041 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK] 2024-12-02T14:10:30,874 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/cd8445254c68407fa077e92d8d6314b8 is 1080, key is row0018/info:/1733148630644/Put/seqid=0 2024-12-02T14:10:30,881 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,881 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,881 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,881 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,881 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:30,881 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148630869.meta 2024-12-02T14:10:30,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741886_1071 (size=11421) 2024-12-02T14:10:30,882 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:30,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741886_1071 (size=11421) 2024-12-02T14:10:30,882 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37515,DS-b3ac9365-de98-4e21-a500-26ee96b24634,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:30,882 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta 2024-12-02T14:10:30,882 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/cd8445254c68407fa077e92d8d6314b8 2024-12-02T14:10:30,882 WARN [IPC Server handler 2 on default port 33497 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741834_1010 2024-12-02T14:10:30,883 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta after 1ms 2024-12-02T14:10:30,884 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39631:39631),(127.0.0.1/127.0.0.1:45703:45703)] 2024-12-02T14:10:30,884 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta is not closed yet, will try archiving it next time 2024-12-02T14:10:30,889 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/.tmp/info/cd8445254c68407fa077e92d8d6314b8 as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/cd8445254c68407fa077e92d8d6314b8 2024-12-02T14:10:30,894 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/cd8445254c68407fa077e92d8d6314b8, entries=6, sequenceid=65, filesize=11.2 K 2024-12-02T14:10:30,895 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 6a07bf265a12f1bc622a306390bdc470 in 28ms, sequenceid=65, compaction requested=false 2024-12-02T14:10:30,896 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/2c50f4ca87424f6f975788ee5b5ee43a, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/99a6409b667c40c08bf043f29bc1670a, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/e068f777b41f4561bebe1d9df08f2e00, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/5d8af950a109418aaeb62c54cbec1c0e, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/72ef0b45e63f460caf4572be4b3c8c06, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/b68f5a3880ca48c1a368103e9d11277b] to archive 2024-12-02T14:10:30,897 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T14:10:30,899 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/2c50f4ca87424f6f975788ee5b5ee43a to hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/2c50f4ca87424f6f975788ee5b5ee43a 2024-12-02T14:10:30,900 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/99a6409b667c40c08bf043f29bc1670a to hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/99a6409b667c40c08bf043f29bc1670a 2024-12-02T14:10:30,901 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/e068f777b41f4561bebe1d9df08f2e00 to hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/e068f777b41f4561bebe1d9df08f2e00 2024-12-02T14:10:30,903 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/5d8af950a109418aaeb62c54cbec1c0e to hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/5d8af950a109418aaeb62c54cbec1c0e 2024-12-02T14:10:30,904 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/72ef0b45e63f460caf4572be4b3c8c06 to hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/72ef0b45e63f460caf4572be4b3c8c06 2024-12-02T14:10:30,905 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/b68f5a3880ca48c1a368103e9d11277b to hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/info/b68f5a3880ca48c1a368103e9d11277b 2024-12-02T14:10:30,906 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=a3a61c9ba14f:37979 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-02T14:10:30,906 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [2c50f4ca87424f6f975788ee5b5ee43a=10347, 99a6409b667c40c08bf043f29bc1670a=12506, e068f777b41f4561bebe1d9df08f2e00=17994, 5d8af950a109418aaeb62c54cbec1c0e=6027, 72ef0b45e63f460caf4572be4b3c8c06=6027, b68f5a3880ca48c1a368103e9d11277b=11421] 2024-12-02T14:10:30,907 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/.tmp/info/8257871f8aa44ea589700de94088eefb is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470./info:regioninfo/1733148594348/Put/seqid=0 2024-12-02T14:10:30,911 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/6a07bf265a12f1bc622a306390bdc470/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-12-02T14:10:30,911 WARN [Thread-1053 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1073 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42087 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:30,911 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:44714 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741887_1073] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data6]'}, localName='127.0.0.1:33603', datanodeUuid='0465bcc1-b8c6-499b-b84f-3f7380979272', xmitsInProgress=0}:Exception transferring block BP-272717943-172.17.0.2-1733148592299:blk_1073741887_1073 to mirror 127.0.0.1:42087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:30,911 WARN [Thread-1053 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741887_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK], DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]) is bad. 2024-12-02T14:10:30,911 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:44714 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741887_1073] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T14:10:30,911 WARN [Thread-1053 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741887_1073 2024-12-02T14:10:30,911 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:44714 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741887_1073] {}] datanode.DataXceiver(331): 127.0.0.1:33603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44714 dst: /127.0.0.1:33603 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:30,912 WARN [Thread-1053 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK] 2024-12-02T14:10:30,912 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:10:30,912 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6a07bf265a12f1bc622a306390bdc470: Waiting for close lock at 1733148630867Running coprocessor pre-close hooks at 1733148630867Disabling compacts and flushes for region at 1733148630867Disabling writes for close at 1733148630867Obtaining lock to block concurrent updates at 1733148630868 (+1 ms)Preparing flush snapshotting stores in 6a07bf265a12f1bc622a306390bdc470 at 1733148630868Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1733148630868Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. at 1733148630869 (+1 ms)Flushing 6a07bf265a12f1bc622a306390bdc470/info: creating writer at 1733148630869Flushing 6a07bf265a12f1bc622a306390bdc470/info: appending metadata at 1733148630873 (+4 ms)Flushing 6a07bf265a12f1bc622a306390bdc470/info: closing flushed file at 1733148630873Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a923fde: reopening flushed file at 1733148630888 (+15 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 6a07bf265a12f1bc622a306390bdc470 in 28ms, sequenceid=65, compaction requested=false at 1733148630895 (+7 ms)Writing region close event to WAL at 1733148630907 (+12 ms)Running coprocessor post-close hooks at 1733148630912 (+5 ms)Closed at 1733148630912 2024-12-02T14:10:30,912 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733148593968.6a07bf265a12f1bc622a306390bdc470. 2024-12-02T14:10:30,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741888_1074 (size=7089) 2024-12-02T14:10:30,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741888_1074 (size=7089) 2024-12-02T14:10:30,917 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/.tmp/info/8257871f8aa44ea589700de94088eefb 2024-12-02T14:10:30,942 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/.tmp/ns/f634aa55a6ff4f2ab05faafb7e7a076c is 43, key is default/ns:d/1733148593791/Put/seqid=0 2024-12-02T14:10:30,946 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:42087 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:30,946 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:44732 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741889_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data6]'}, localName='127.0.0.1:33603', datanodeUuid='0465bcc1-b8c6-499b-b84f-3f7380979272', xmitsInProgress=0}:Exception transferring block BP-272717943-172.17.0.2-1733148592299:blk_1073741889_1075 to mirror 127.0.0.1:42087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:30,946 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-272717943-172.17.0.2-1733148592299:blk_1073741889_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33603,DS-2b14f9a5-fca6-48a4-ad9c-332875addcf8,DISK], DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK]) is bad. 2024-12-02T14:10:30,946 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-272717943-172.17.0.2-1733148592299:blk_1073741889_1075 2024-12-02T14:10:30,946 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:44732 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741889_1075] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-02T14:10:30,946 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1513329779_22 at /127.0.0.1:44732 [Receiving block BP-272717943-172.17.0.2-1733148592299:blk_1073741889_1075] {}] datanode.DataXceiver(331): 127.0.0.1:33603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44732 dst: /127.0.0.1:33603 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:30,947 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:42087,DS-695eb987-1cce-4381-8f57-7efc92958691,DISK] 2024-12-02T14:10:30,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741890_1076 (size=5153) 2024-12-02T14:10:30,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741890_1076 (size=5153) 2024-12-02T14:10:30,952 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/.tmp/ns/f634aa55a6ff4f2ab05faafb7e7a076c 2024-12-02T14:10:30,970 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/.tmp/table/6f583dc167a342c98f842101736f3201 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733148594358/Put/seqid=0 2024-12-02T14:10:30,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741891_1077 (size=5424) 2024-12-02T14:10:30,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741891_1077 (size=5424) 2024-12-02T14:10:30,976 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/.tmp/table/6f583dc167a342c98f842101736f3201 2024-12-02T14:10:30,982 INFO [regionserver/a3a61c9ba14f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T14:10:30,982 INFO [regionserver/a3a61c9ba14f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T14:10:30,983 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/.tmp/info/8257871f8aa44ea589700de94088eefb as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/info/8257871f8aa44ea589700de94088eefb 2024-12-02T14:10:30,989 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/info/8257871f8aa44ea589700de94088eefb, entries=10, sequenceid=11, filesize=6.9 K 2024-12-02T14:10:30,990 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/.tmp/ns/f634aa55a6ff4f2ab05faafb7e7a076c as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/ns/f634aa55a6ff4f2ab05faafb7e7a076c 2024-12-02T14:10:30,996 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/ns/f634aa55a6ff4f2ab05faafb7e7a076c, entries=2, sequenceid=11, filesize=5.0 K 2024-12-02T14:10:30,997 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/.tmp/table/6f583dc167a342c98f842101736f3201 as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/table/6f583dc167a342c98f842101736f3201 2024-12-02T14:10:31,003 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/table/6f583dc167a342c98f842101736f3201, entries=2, sequenceid=11, filesize=5.3 K 2024-12-02T14:10:31,004 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false 2024-12-02T14:10:31,010 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-02T14:10:31,010 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T14:10:31,011 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:10:31,011 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733148630868Running coprocessor pre-close hooks at 1733148630868Disabling compacts and flushes for region at 1733148630868Disabling writes for close at 1733148630868Obtaining lock to block concurrent updates at 1733148630868Preparing flush snapshotting stores in 1588230740 at 1733148630868Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733148630869 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733148630885 (+16 ms)Flushing 1588230740/info: creating writer at 1733148630885Flushing 1588230740/info: appending metadata at 1733148630907 (+22 ms)Flushing 1588230740/info: closing flushed file at 1733148630907Flushing 1588230740/ns: creating writer at 1733148630923 (+16 ms)Flushing 1588230740/ns: appending metadata at 1733148630942 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1733148630942Flushing 1588230740/table: creating writer at 1733148630957 (+15 ms)Flushing 1588230740/table: appending metadata at 1733148630970 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733148630970Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@743af329: reopening flushed file at 1733148630982 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@18f33225: reopening flushed file at 1733148630989 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a2baf12: reopening flushed file at 1733148630996 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 136ms, sequenceid=11, compaction requested=false at 1733148631004 (+8 ms)Writing region close event to WAL at 1733148631006 (+2 ms)Running coprocessor post-close hooks at 1733148631010 (+4 ms)Closed at 1733148631011 (+1 ms) 2024-12-02T14:10:31,011 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T14:10:31,031 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.1733148612638 to hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/oldWALs/a3a61c9ba14f%2C32833%2C1733148592935.1733148612638 2024-12-02T14:10:31,068 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(976): stopping server a3a61c9ba14f,32833,1733148592935; all regions closed. 2024-12-02T14:10:31,069 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:31,069 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:31,069 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:31,069 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:31,069 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:31,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741885_1070 (size=825) 2024-12-02T14:10:31,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741885_1070 (size=825) 2024-12-02T14:10:31,194 INFO [regionserver/a3a61c9ba14f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:10:31,256 INFO [regionserver/a3a61c9ba14f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T14:10:31,256 INFO [regionserver/a3a61c9ba14f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T14:10:31,936 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2dd3469e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39105, datanodeUuid=67b633d0-c428-40af-9668-81c34e41e2e9, infoPort=45703, infoSecurePort=0, ipcPort=42839, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299):Failed to transfer BP-272717943-172.17.0.2-1733148592299:blk_1073741827_1003 to 127.0.0.1:42087 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:31,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:10:31,942 INFO [regionserver/a3a61c9ba14f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:10:32,268 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@d0a93d1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33603, datanodeUuid=0465bcc1-b8c6-499b-b84f-3f7380979272, infoPort=39631, infoSecurePort=0, ipcPort=35877, storageInfo=lv=-57;cid=testClusterID;nsid=996656154;c=1733148592299):Failed to transfer BP-272717943-172.17.0.2-1733148592299:blk_1073741861_1044 to 127.0.0.1:42087 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:32,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:10:34,421 INFO [master/a3a61c9ba14f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T14:10:34,421 INFO [master/a3a61c9ba14f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T14:10:34,724 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-02T14:10:34,726 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T14:10:34,726 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T14:10:34,872 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 after 4003ms 2024-12-02T14:10:34,884 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta after 4002ms 2024-12-02T14:10:34,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741835_1011 (size=393) 2024-12-02T14:10:34,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:10:35,870 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-02T14:10:35,876 DEBUG [RS:1;a3a61c9ba14f:36269 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/oldWALs 2024-12-02T14:10:35,876 INFO [RS:1;a3a61c9ba14f:36269 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C36269%2C1733148593887:(num 1733148594073) 2024-12-02T14:10:35,876 DEBUG [RS:1;a3a61c9ba14f:36269 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:10:35,876 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:10:35,877 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:10:35,877 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.ChoreService(370): Chore service for: regionserver/a3a61c9ba14f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T14:10:35,878 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T14:10:35,878 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:10:35,878 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T14:10:35,878 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T14:10:35,878 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:10:35,879 INFO [RS:1;a3a61c9ba14f:36269 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36269 2024-12-02T14:10:35,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:10:35,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1009b4427530002, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a3a61c9ba14f,36269,1733148593887 2024-12-02T14:10:35,881 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:10:35,882 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a3a61c9ba14f,36269,1733148593887] 2024-12-02T14:10:35,883 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a3a61c9ba14f,36269,1733148593887 already deleted, retry=false 2024-12-02T14:10:35,883 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a3a61c9ba14f,36269,1733148593887 expired; onlineServers=1 2024-12-02T14:10:35,913 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:35,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:35,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:35,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:35,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:35,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:35,933 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:35,933 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:35,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1009b4427530002, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:10:35,982 INFO [RS:1;a3a61c9ba14f:36269 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:10:35,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36269-0x1009b4427530002, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:10:35,982 INFO [RS:1;a3a61c9ba14f:36269 {}] regionserver.HRegionServer(1031): Exiting; stopping=a3a61c9ba14f,36269,1733148593887; zookeeper connection closed. 2024-12-02T14:10:35,983 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@334b1885 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@334b1885 2024-12-02T14:10:36,070 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-02T14:10:36,079 DEBUG [RS:0;a3a61c9ba14f:32833 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/oldWALs 2024-12-02T14:10:36,079 INFO [RS:0;a3a61c9ba14f:32833 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C32833%2C1733148592935.meta:.meta(num 1733148630869) 2024-12-02T14:10:36,080 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:36,080 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:36,080 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:36,080 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:36,080 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:36,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741880_1064 (size=15140) 2024-12-02T14:10:36,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741880_1064 (size=15140) 2024-12-02T14:10:36,085 DEBUG [RS:0;a3a61c9ba14f:32833 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/oldWALs 2024-12-02T14:10:36,085 INFO [RS:0;a3a61c9ba14f:32833 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C32833%2C1733148592935:(num 1733148630607) 2024-12-02T14:10:36,085 DEBUG [RS:0;a3a61c9ba14f:32833 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:10:36,085 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:10:36,085 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:10:36,085 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.ChoreService(370): Chore service for: regionserver/a3a61c9ba14f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T14:10:36,085 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:10:36,085 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:10:36,085 INFO [RS:0;a3a61c9ba14f:32833 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32833 2024-12-02T14:10:36,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a3a61c9ba14f,32833,1733148592935 2024-12-02T14:10:36,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:10:36,087 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:10:36,088 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a3a61c9ba14f,32833,1733148592935] 2024-12-02T14:10:36,089 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a3a61c9ba14f,32833,1733148592935 already deleted, retry=false 2024-12-02T14:10:36,089 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a3a61c9ba14f,32833,1733148592935 expired; onlineServers=0 2024-12-02T14:10:36,089 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a3a61c9ba14f,37979,1733148592890' ***** 2024-12-02T14:10:36,089 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T14:10:36,089 INFO [M:0;a3a61c9ba14f:37979 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:10:36,089 INFO [M:0;a3a61c9ba14f:37979 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:10:36,089 DEBUG [M:0;a3a61c9ba14f:37979 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T14:10:36,089 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T14:10:36,089 DEBUG [M:0;a3a61c9ba14f:37979 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T14:10:36,089 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148593121 {}] cleaner.HFileCleaner(306): Exit Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148593121,5,FailOnTimeoutGroup] 2024-12-02T14:10:36,089 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148593118 {}] cleaner.HFileCleaner(306): Exit Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148593118,5,FailOnTimeoutGroup] 2024-12-02T14:10:36,090 INFO [M:0;a3a61c9ba14f:37979 {}] hbase.ChoreService(370): Chore service for: master/a3a61c9ba14f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T14:10:36,090 INFO [M:0;a3a61c9ba14f:37979 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:10:36,090 DEBUG [M:0;a3a61c9ba14f:37979 {}] master.HMaster(1795): Stopping service threads 2024-12-02T14:10:36,090 INFO [M:0;a3a61c9ba14f:37979 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T14:10:36,090 INFO [M:0;a3a61c9ba14f:37979 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:10:36,090 INFO [M:0;a3a61c9ba14f:37979 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T14:10:36,091 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T14:10:36,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T14:10:36,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:36,091 DEBUG [M:0;a3a61c9ba14f:37979 {}] zookeeper.ZKUtil(347): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T14:10:36,091 WARN [M:0;a3a61c9ba14f:37979 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T14:10:36,092 INFO [M:0;a3a61c9ba14f:37979 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/.lastflushedseqids 2024-12-02T14:10:36,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741892_1078 (size=130) 2024-12-02T14:10:36,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741892_1078 (size=130) 2024-12-02T14:10:36,098 INFO [M:0;a3a61c9ba14f:37979 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T14:10:36,098 INFO [M:0;a3a61c9ba14f:37979 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T14:10:36,099 DEBUG [M:0;a3a61c9ba14f:37979 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:10:36,099 INFO [M:0;a3a61c9ba14f:37979 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:10:36,099 DEBUG [M:0;a3a61c9ba14f:37979 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:10:36,099 DEBUG [M:0;a3a61c9ba14f:37979 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:10:36,099 DEBUG [M:0;a3a61c9ba14f:37979 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:10:36,099 INFO [M:0;a3a61c9ba14f:37979 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-12-02T14:10:36,114 DEBUG [M:0;a3a61c9ba14f:37979 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/11404bc718f748e9ab400545507b4237 is 82, key is hbase:meta,,1/info:regioninfo/1733148593777/Put/seqid=0 2024-12-02T14:10:36,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741893_1079 (size=5672) 2024-12-02T14:10:36,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741893_1079 (size=5672) 2024-12-02T14:10:36,120 INFO [M:0;a3a61c9ba14f:37979 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/11404bc718f748e9ab400545507b4237 2024-12-02T14:10:36,141 DEBUG [M:0;a3a61c9ba14f:37979 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/990b7f63bc684e0682d270549120baa4 is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733148594363/Put/seqid=0 2024-12-02T14:10:36,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741894_1080 (size=6254) 2024-12-02T14:10:36,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741894_1080 (size=6254) 2024-12-02T14:10:36,146 INFO [M:0;a3a61c9ba14f:37979 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/990b7f63bc684e0682d270549120baa4 2024-12-02T14:10:36,151 INFO [M:0;a3a61c9ba14f:37979 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 990b7f63bc684e0682d270549120baa4 2024-12-02T14:10:36,166 DEBUG [M:0;a3a61c9ba14f:37979 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f1e2b3b37a0f42399702d9475547b1c2 is 69, key is a3a61c9ba14f,32833,1733148592935/rs:state/1733148593177/Put/seqid=0 2024-12-02T14:10:36,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741895_1081 (size=5224) 2024-12-02T14:10:36,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741895_1081 (size=5224) 2024-12-02T14:10:36,171 INFO [M:0;a3a61c9ba14f:37979 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f1e2b3b37a0f42399702d9475547b1c2 2024-12-02T14:10:36,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:10:36,188 INFO [RS:0;a3a61c9ba14f:32833 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:10:36,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32833-0x1009b4427530001, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:10:36,188 INFO [RS:0;a3a61c9ba14f:32833 {}] regionserver.HRegionServer(1031): Exiting; stopping=a3a61c9ba14f,32833,1733148592935; zookeeper connection closed. 2024-12-02T14:10:36,188 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@64db250a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@64db250a 2024-12-02T14:10:36,188 DEBUG [M:0;a3a61c9ba14f:37979 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e3155343a61e4aee80aa14eec8a76e6a is 52, key is load_balancer_on/state:d/1733148593872/Put/seqid=0 2024-12-02T14:10:36,188 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-02T14:10:36,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741896_1082 (size=5056) 2024-12-02T14:10:36,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741896_1082 (size=5056) 2024-12-02T14:10:36,194 INFO [M:0;a3a61c9ba14f:37979 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e3155343a61e4aee80aa14eec8a76e6a 2024-12-02T14:10:36,199 DEBUG [M:0;a3a61c9ba14f:37979 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/11404bc718f748e9ab400545507b4237 as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/11404bc718f748e9ab400545507b4237 2024-12-02T14:10:36,206 INFO [M:0;a3a61c9ba14f:37979 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/11404bc718f748e9ab400545507b4237, entries=8, sequenceid=60, filesize=5.5 K 2024-12-02T14:10:36,207 DEBUG [M:0;a3a61c9ba14f:37979 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/990b7f63bc684e0682d270549120baa4 as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/990b7f63bc684e0682d270549120baa4 2024-12-02T14:10:36,214 INFO [M:0;a3a61c9ba14f:37979 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 990b7f63bc684e0682d270549120baa4 2024-12-02T14:10:36,214 INFO [M:0;a3a61c9ba14f:37979 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/990b7f63bc684e0682d270549120baa4, entries=6, sequenceid=60, filesize=6.1 K 2024-12-02T14:10:36,215 DEBUG [M:0;a3a61c9ba14f:37979 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f1e2b3b37a0f42399702d9475547b1c2 as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f1e2b3b37a0f42399702d9475547b1c2 2024-12-02T14:10:36,220 INFO [M:0;a3a61c9ba14f:37979 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f1e2b3b37a0f42399702d9475547b1c2, entries=2, sequenceid=60, filesize=5.1 K 2024-12-02T14:10:36,221 DEBUG [M:0;a3a61c9ba14f:37979 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e3155343a61e4aee80aa14eec8a76e6a as hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e3155343a61e4aee80aa14eec8a76e6a 2024-12-02T14:10:36,226 INFO [M:0;a3a61c9ba14f:37979 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e3155343a61e4aee80aa14eec8a76e6a, entries=1, sequenceid=60, filesize=4.9 K 2024-12-02T14:10:36,227 INFO [M:0;a3a61c9ba14f:37979 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=60, compaction requested=false 2024-12-02T14:10:36,228 INFO [M:0;a3a61c9ba14f:37979 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:10:36,228 DEBUG [M:0;a3a61c9ba14f:37979 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733148636099Disabling compacts and flushes for region at 1733148636099Disabling writes for close at 1733148636099Obtaining lock to block concurrent updates at 1733148636099Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733148636099Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1733148636099Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733148636100 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733148636100Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733148636113 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733148636113Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733148636126 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733148636140 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733148636140Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733148636151 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733148636166 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733148636166Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733148636175 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733148636188 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733148636188Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ef537f9: reopening flushed file at 1733148636198 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@753e8583: reopening flushed file at 1733148636206 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7caa63c5: reopening flushed file at 1733148636214 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@735fbdb9: reopening flushed file at 1733148636220 (+6 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=60, compaction requested=false at 1733148636227 (+7 ms)Writing region close event to WAL at 1733148636228 (+1 ms)Closed at 1733148636228 2024-12-02T14:10:36,229 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:36,229 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:36,229 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:36,229 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:36,229 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:36,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39105 is added to blk_1073741878_1061 (size=1045) 2024-12-02T14:10:36,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33603 is added to blk_1073741878_1061 (size=1045) 2024-12-02T14:10:36,437 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T14:10:36,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:36,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:36,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:36,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:36,456 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:36,459 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:36,459 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:36,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:36,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:36,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:37,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:37,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:38,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:38,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:39,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:39,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:40,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:40,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:40,940 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4d5faad4 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-272717943-172.17.0.2-1733148592299:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:37515,null,null]) java.net.ConnectException: Call From a3a61c9ba14f/172.17.0.2 to localhost:36131 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-02T14:10:41,229 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-02T14:10:41,230 INFO [M:0;a3a61c9ba14f:37979 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T14:10:41,230 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:10:41,230 INFO [M:0;a3a61c9ba14f:37979 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37979 2024-12-02T14:10:41,230 INFO [M:0;a3a61c9ba14f:37979 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:10:41,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:10:41,331 INFO [M:0;a3a61c9ba14f:37979 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:10:41,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37979-0x1009b4427530000, quorum=127.0.0.1:64260, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:10:41,336 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e407f59{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:41,337 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@413b5d87{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:10:41,337 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:10:41,337 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aaad1eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:10:41,338 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39eb7ad6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir/,STOPPED} 2024-12-02T14:10:41,340 WARN [BP-272717943-172.17.0.2-1733148592299 heartbeating to localhost/127.0.0.1:33497 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:10:41,340 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:10:41,340 WARN [BP-272717943-172.17.0.2-1733148592299 heartbeating to localhost/127.0.0.1:33497 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-272717943-172.17.0.2-1733148592299 (Datanode Uuid 67b633d0-c428-40af-9668-81c34e41e2e9) service to localhost/127.0.0.1:33497 2024-12-02T14:10:41,340 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:10:41,340 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4d5faad4 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-272717943-172.17.0.2-1733148592299:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:37515,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:36131 , LocalHost:localPort a3a61c9ba14f/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-02T14:10:41,341 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4d5faad4 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-272717943-172.17.0.2-1733148592299:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:39105,null,null]) java.io.IOException: No block pool offer service for bpid=BP-272717943-172.17.0.2-1733148592299 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:41,341 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4d5faad4 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-272717943-172.17.0.2-1733148592299:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:37515,null,null], DatanodeInfoWithStorage[127.0.0.1:39105,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-272717943-172.17.0.2-1733148592299:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:37515,null,null], DatanodeInfoWithStorage[127.0.0.1:39105,null,null]] 2024-12-02T14:10:41,341 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data3/current/BP-272717943-172.17.0.2-1733148592299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:41,342 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data4/current/BP-272717943-172.17.0.2-1733148592299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:41,342 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:10:41,345 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@44952386{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:41,345 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@57ab428e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:10:41,345 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:10:41,345 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65266587{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:10:41,346 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22d0350b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir/,STOPPED} 2024-12-02T14:10:41,346 WARN [BP-272717943-172.17.0.2-1733148592299 heartbeating to localhost/127.0.0.1:33497 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:10:41,346 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:10:41,346 WARN [BP-272717943-172.17.0.2-1733148592299 heartbeating to localhost/127.0.0.1:33497 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-272717943-172.17.0.2-1733148592299 (Datanode Uuid 0465bcc1-b8c6-499b-b84f-3f7380979272) service to localhost/127.0.0.1:33497 2024-12-02T14:10:41,346 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:10:41,347 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data5/current/BP-272717943-172.17.0.2-1733148592299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:41,347 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/cluster_857325ae-581f-cf4a-4693-2efcaf04d565/data/data6/current/BP-272717943-172.17.0.2-1733148592299 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:41,347 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:10:41,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@42a4a79c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:10:41,352 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@195100a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:10:41,352 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:10:41,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@477187fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:10:41,353 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ad82de5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir/,STOPPED} 2024-12-02T14:10:41,360 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T14:10:41,391 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T14:10:41,399 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 78) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33497 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33497 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fc1e0bf65c8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:33497 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33353 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fc1e0bf65c8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:33353 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33497 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:33497 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33497 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33497 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33497 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33497 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007fc1e0bf65c8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33497 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33497 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=440 (was 405) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=44 (was 49), ProcessCount=11 (was 11), AvailableMemoryMB=6168 (was 6747) 2024-12-02T14:10:41,406 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=440, MaxFileDescriptor=1048576, SystemLoadAverage=44, ProcessCount=11, AvailableMemoryMB=6169 2024-12-02T14:10:41,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T14:10:41,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.log.dir so I do NOT create it in target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293 2024-12-02T14:10:41,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d482d05b-6f1d-5e7a-963b-107f51754339/hadoop.tmp.dir so I do NOT create it in target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293 2024-12-02T14:10:41,406 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a, deleteOnExit=true 2024-12-02T14:10:41,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T14:10:41,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/test.cache.data in system properties and HBase conf 2024-12-02T14:10:41,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T14:10:41,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir in system properties and HBase conf 2024-12-02T14:10:41,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T14:10:41,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T14:10:41,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T14:10:41,407 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T14:10:41,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:10:41,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:10:41,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T14:10:41,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:10:41,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T14:10:41,407 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T14:10:41,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:10:41,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:10:41,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T14:10:41,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/nfs.dump.dir in system properties and HBase conf 2024-12-02T14:10:41,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/java.io.tmpdir in system properties and HBase conf 2024-12-02T14:10:41,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:10:41,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T14:10:41,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T14:10:41,419 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T14:10:41,458 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:10:41,462 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:10:41,463 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:10:41,463 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:10:41,463 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:10:41,464 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:10:41,464 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a3a743f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:10:41,464 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@421a8f73{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:10:41,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@53a4c428{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/java.io.tmpdir/jetty-localhost-33637-hadoop-hdfs-3_4_1-tests_jar-_-any-9637999901090442501/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:10:41,554 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@347a2271{HTTP/1.1, (http/1.1)}{localhost:33637} 2024-12-02T14:10:41,554 INFO [Time-limited test {}] server.Server(415): Started @151217ms 2024-12-02T14:10:41,565 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T14:10:41,609 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:10:41,612 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:10:41,613 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:10:41,613 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:10:41,613 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:10:41,614 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@354edf1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:10:41,614 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66c0323e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:10:41,704 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60017892{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/java.io.tmpdir/jetty-localhost-44077-hadoop-hdfs-3_4_1-tests_jar-_-any-6146197991755512118/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:41,704 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@fefca8b{HTTP/1.1, (http/1.1)}{localhost:44077} 2024-12-02T14:10:41,704 INFO [Time-limited test {}] server.Server(415): Started @151367ms 2024-12-02T14:10:41,706 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:10:41,732 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:10:41,736 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:10:41,737 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:10:41,737 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:10:41,737 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:10:41,738 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11b4bf4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:10:41,738 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28441b3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:10:41,765 WARN [Thread-1188 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data1/current/BP-2116755885-172.17.0.2-1733148641423/current, will proceed with Du for space computation calculation, 2024-12-02T14:10:41,765 WARN [Thread-1189 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data2/current/BP-2116755885-172.17.0.2-1733148641423/current, will proceed with Du for space computation calculation, 2024-12-02T14:10:41,782 WARN [Thread-1167 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:10:41,785 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa9470397c3b645ec with lease ID 0xb31d765e312bc58d: Processing first storage report for DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0 from datanode DatanodeRegistration(127.0.0.1:36889, datanodeUuid=8da22597-c7c2-4ba5-a5c1-5bbf45bdc016, infoPort=44975, infoSecurePort=0, ipcPort=36351, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423) 2024-12-02T14:10:41,785 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa9470397c3b645ec with lease ID 0xb31d765e312bc58d: from storage DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0 node DatanodeRegistration(127.0.0.1:36889, datanodeUuid=8da22597-c7c2-4ba5-a5c1-5bbf45bdc016, infoPort=44975, infoSecurePort=0, ipcPort=36351, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:10:41,785 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa9470397c3b645ec with lease ID 0xb31d765e312bc58d: Processing first storage report for DS-8672793c-9f2f-4502-9648-a69adbdd0729 from datanode DatanodeRegistration(127.0.0.1:36889, datanodeUuid=8da22597-c7c2-4ba5-a5c1-5bbf45bdc016, infoPort=44975, infoSecurePort=0, ipcPort=36351, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423) 2024-12-02T14:10:41,785 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa9470397c3b645ec with lease ID 0xb31d765e312bc58d: from storage DS-8672793c-9f2f-4502-9648-a69adbdd0729 node DatanodeRegistration(127.0.0.1:36889, datanodeUuid=8da22597-c7c2-4ba5-a5c1-5bbf45bdc016, infoPort=44975, infoSecurePort=0, ipcPort=36351, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:10:41,837 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6fab6db5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/java.io.tmpdir/jetty-localhost-44667-hadoop-hdfs-3_4_1-tests_jar-_-any-17764791949847048930/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:41,837 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43a454f0{HTTP/1.1, (http/1.1)}{localhost:44667} 2024-12-02T14:10:41,838 INFO [Time-limited test {}] server.Server(415): Started @151500ms 2024-12-02T14:10:41,839 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:10:41,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:41,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:41,895 WARN [Thread-1214 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data3/current/BP-2116755885-172.17.0.2-1733148641423/current, will proceed with Du for space computation calculation, 2024-12-02T14:10:41,895 WARN [Thread-1215 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data4/current/BP-2116755885-172.17.0.2-1733148641423/current, will proceed with Du for space computation calculation, 2024-12-02T14:10:41,913 WARN [Thread-1203 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:10:41,915 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa1b2456ec4574ab5 with lease ID 0xb31d765e312bc58e: Processing first storage report for DS-5001f710-acce-4575-9a21-435fcd93ab13 from datanode DatanodeRegistration(127.0.0.1:41973, datanodeUuid=ffbefef4-3524-41ba-bac2-c52fc664f9c6, infoPort=33571, infoSecurePort=0, ipcPort=40971, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423) 2024-12-02T14:10:41,916 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa1b2456ec4574ab5 with lease ID 0xb31d765e312bc58e: from storage DS-5001f710-acce-4575-9a21-435fcd93ab13 node DatanodeRegistration(127.0.0.1:41973, datanodeUuid=ffbefef4-3524-41ba-bac2-c52fc664f9c6, infoPort=33571, infoSecurePort=0, ipcPort=40971, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:10:41,916 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa1b2456ec4574ab5 with lease ID 0xb31d765e312bc58e: Processing first storage report for DS-eb5d1eab-833f-471f-8592-87d275379606 from datanode DatanodeRegistration(127.0.0.1:41973, datanodeUuid=ffbefef4-3524-41ba-bac2-c52fc664f9c6, infoPort=33571, infoSecurePort=0, ipcPort=40971, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423) 2024-12-02T14:10:41,916 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa1b2456ec4574ab5 with lease ID 0xb31d765e312bc58e: from storage DS-eb5d1eab-833f-471f-8592-87d275379606 node DatanodeRegistration(127.0.0.1:41973, datanodeUuid=ffbefef4-3524-41ba-bac2-c52fc664f9c6, infoPort=33571, infoSecurePort=0, ipcPort=40971, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T14:10:41,961 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293 2024-12-02T14:10:41,963 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/zookeeper_0, clientPort=55590, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T14:10:41,964 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55590 2024-12-02T14:10:41,964 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:10:41,966 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:10:41,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36889 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:10:41,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:10:41,976 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40 with version=8 2024-12-02T14:10:41,976 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/hbase-staging 2024-12-02T14:10:41,978 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:10:41,978 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:10:41,978 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:10:41,978 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:10:41,978 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:10:41,978 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:10:41,978 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T14:10:41,978 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:10:41,979 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36523 2024-12-02T14:10:41,980 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36523 connecting to ZooKeeper ensemble=127.0.0.1:55590 2024-12-02T14:10:41,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:365230x0, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:10:41,983 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36523-0x1009b44e7190000 connected 2024-12-02T14:10:42,002 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:10:42,004 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:10:42,008 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:10:42,008 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40, hbase.cluster.distributed=false 2024-12-02T14:10:42,010 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:10:42,012 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36523 2024-12-02T14:10:42,013 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36523 2024-12-02T14:10:42,013 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36523 2024-12-02T14:10:42,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36523 2024-12-02T14:10:42,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36523 2024-12-02T14:10:42,030 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:10:42,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:10:42,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:10:42,031 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:10:42,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:10:42,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:10:42,031 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T14:10:42,031 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:10:42,032 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42177 2024-12-02T14:10:42,034 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42177 connecting to ZooKeeper ensemble=127.0.0.1:55590 2024-12-02T14:10:42,036 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:10:42,038 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:10:42,042 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:421770x0, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:10:42,043 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:421770x0, quorum=127.0.0.1:55590, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:10:42,043 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T14:10:42,045 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42177-0x1009b44e7190001 connected 2024-12-02T14:10:42,049 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T14:10:42,050 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T14:10:42,051 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:10:42,053 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42177 2024-12-02T14:10:42,053 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42177 2024-12-02T14:10:42,055 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42177 2024-12-02T14:10:42,057 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42177 2024-12-02T14:10:42,057 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42177 2024-12-02T14:10:42,066 DEBUG [M:0;a3a61c9ba14f:36523 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a3a61c9ba14f:36523 2024-12-02T14:10:42,067 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a3a61c9ba14f,36523,1733148641978 2024-12-02T14:10:42,068 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:10:42,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:10:42,069 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a3a61c9ba14f,36523,1733148641978 2024-12-02T14:10:42,070 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T14:10:42,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:42,070 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:42,070 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T14:10:42,070 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a3a61c9ba14f,36523,1733148641978 from backup master directory 2024-12-02T14:10:42,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a3a61c9ba14f,36523,1733148641978 2024-12-02T14:10:42,071 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:10:42,071 WARN [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:10:42,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:10:42,071 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a3a61c9ba14f,36523,1733148641978 2024-12-02T14:10:42,076 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/hbase.id] with ID: f6f7c096-89c4-4098-bc4a-d85ccab25799 2024-12-02T14:10:42,076 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/.tmp/hbase.id 2024-12-02T14:10:42,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36889 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:10:42,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:10:42,082 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/.tmp/hbase.id]:[hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/hbase.id] 2024-12-02T14:10:42,096 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:10:42,096 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T14:10:42,097 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-02T14:10:42,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:42,099 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:42,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:10:42,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36889 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:10:42,106 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T14:10:42,106 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T14:10:42,107 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:10:42,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:10:42,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36889 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:10:42,113 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store 2024-12-02T14:10:42,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36889 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:10:42,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:10:42,121 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:10:42,121 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:10:42,121 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:10:42,121 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:10:42,121 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:10:42,121 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:10:42,121 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:10:42,121 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733148642121Disabling compacts and flushes for region at 1733148642121Disabling writes for close at 1733148642121Writing region close event to WAL at 1733148642121Closed at 1733148642121 2024-12-02T14:10:42,122 WARN [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/.initializing 2024-12-02T14:10:42,122 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/WALs/a3a61c9ba14f,36523,1733148641978 2024-12-02T14:10:42,125 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C36523%2C1733148641978, suffix=, logDir=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/WALs/a3a61c9ba14f,36523,1733148641978, archiveDir=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/oldWALs, maxLogs=10 2024-12-02T14:10:42,125 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C36523%2C1733148641978.1733148642125 2024-12-02T14:10:42,132 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/WALs/a3a61c9ba14f,36523,1733148641978/a3a61c9ba14f%2C36523%2C1733148641978.1733148642125 2024-12-02T14:10:42,133 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44975:44975),(127.0.0.1/127.0.0.1:33571:33571)] 2024-12-02T14:10:42,136 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:10:42,136 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:10:42,136 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:10:42,136 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:10:42,140 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:10:42,142 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T14:10:42,142 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:42,142 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:10:42,142 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:10:42,143 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T14:10:42,143 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:42,144 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:10:42,144 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:10:42,145 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T14:10:42,145 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:42,146 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:10:42,146 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:10:42,147 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T14:10:42,147 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:42,147 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:10:42,148 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:10:42,148 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:10:42,149 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:10:42,150 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:10:42,150 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:10:42,150 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T14:10:42,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:42,152 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:10:42,154 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:10:42,154 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=699713, jitterRate=-0.11026977002620697}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T14:10:42,155 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733148642137Initializing all the Stores at 1733148642137Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148642137Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148642140 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148642140Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148642140Cleaning up temporary data from old regions at 1733148642150 (+10 ms)Region opened successfully at 1733148642155 (+5 ms) 2024-12-02T14:10:42,155 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T14:10:42,158 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b0dfe84, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:10:42,159 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T14:10:42,159 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T14:10:42,159 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T14:10:42,160 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T14:10:42,160 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T14:10:42,160 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-02T14:10:42,161 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T14:10:42,163 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T14:10:42,164 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T14:10:42,164 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T14:10:42,165 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T14:10:42,165 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T14:10:42,166 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T14:10:42,166 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T14:10:42,167 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T14:10:42,168 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T14:10:42,169 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T14:10:42,170 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T14:10:42,171 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T14:10:42,172 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T14:10:42,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:10:42,173 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:10:42,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:42,173 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:42,174 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a3a61c9ba14f,36523,1733148641978, sessionid=0x1009b44e7190000, setting cluster-up flag (Was=false) 2024-12-02T14:10:42,175 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:42,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:42,178 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T14:10:42,179 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a3a61c9ba14f,36523,1733148641978 2024-12-02T14:10:42,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:42,181 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:42,184 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T14:10:42,185 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a3a61c9ba14f,36523,1733148641978 2024-12-02T14:10:42,186 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T14:10:42,191 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T14:10:42,191 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T14:10:42,191 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T14:10:42,191 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a3a61c9ba14f,36523,1733148641978 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T14:10:42,193 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:10:42,194 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:10:42,194 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:10:42,194 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:10:42,194 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a3a61c9ba14f:0, corePoolSize=10, maxPoolSize=10 2024-12-02T14:10:42,194 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:10:42,194 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:10:42,194 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:10:42,198 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733148672198 2024-12-02T14:10:42,198 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T14:10:42,199 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T14:10:42,199 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T14:10:42,199 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T14:10:42,199 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T14:10:42,199 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:10:42,199 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T14:10:42,199 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T14:10:42,200 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,200 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T14:10:42,200 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T14:10:42,200 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T14:10:42,201 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T14:10:42,201 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T14:10:42,201 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:42,201 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T14:10:42,205 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148642201,5,FailOnTimeoutGroup] 2024-12-02T14:10:42,208 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148642205,5,FailOnTimeoutGroup] 2024-12-02T14:10:42,208 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,208 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T14:10:42,208 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,209 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36889 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:10:42,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:10:42,219 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T14:10:42,219 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40 2024-12-02T14:10:42,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36889 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:10:42,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:10:42,233 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:10:42,234 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:10:42,236 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:10:42,236 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:42,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:10:42,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:10:42,239 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:10:42,239 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:42,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:10:42,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:10:42,241 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:10:42,241 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:42,242 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:10:42,242 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:10:42,244 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:10:42,244 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:42,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:10:42,245 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:10:42,246 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740 2024-12-02T14:10:42,246 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740 2024-12-02T14:10:42,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:10:42,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:10:42,249 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:10:42,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:10:42,254 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:10:42,255 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=845018, jitterRate=0.07449650764465332}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:10:42,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733148642233Initializing all the Stores at 1733148642234 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148642234Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148642234Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148642234Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148642234Cleaning up temporary data from old regions at 1733148642248 (+14 ms)Region opened successfully at 1733148642255 (+7 ms) 2024-12-02T14:10:42,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:10:42,256 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:10:42,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:10:42,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:10:42,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:10:42,256 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:10:42,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733148642256Disabling compacts and flushes for region at 1733148642256Disabling writes for close at 1733148642256Writing region close event to WAL at 1733148642256Closed at 1733148642256 2024-12-02T14:10:42,258 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:10:42,258 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T14:10:42,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T14:10:42,258 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(746): ClusterId : f6f7c096-89c4-4098-bc4a-d85ccab25799 2024-12-02T14:10:42,258 DEBUG [RS:0;a3a61c9ba14f:42177 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T14:10:42,260 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:10:42,261 DEBUG [RS:0;a3a61c9ba14f:42177 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T14:10:42,261 DEBUG [RS:0;a3a61c9ba14f:42177 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T14:10:42,263 DEBUG [RS:0;a3a61c9ba14f:42177 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T14:10:42,263 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T14:10:42,264 DEBUG [RS:0;a3a61c9ba14f:42177 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e92928f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:10:42,280 DEBUG [RS:0;a3a61c9ba14f:42177 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a3a61c9ba14f:42177 2024-12-02T14:10:42,281 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T14:10:42,281 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T14:10:42,281 DEBUG [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T14:10:42,282 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(2659): reportForDuty to master=a3a61c9ba14f,36523,1733148641978 with port=42177, startcode=1733148642030 2024-12-02T14:10:42,282 DEBUG [RS:0;a3a61c9ba14f:42177 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T14:10:42,285 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50665, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T14:10:42,285 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36523 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a3a61c9ba14f,42177,1733148642030 2024-12-02T14:10:42,285 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36523 {}] master.ServerManager(517): Registering regionserver=a3a61c9ba14f,42177,1733148642030 2024-12-02T14:10:42,287 DEBUG [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40 2024-12-02T14:10:42,287 DEBUG [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46733 2024-12-02T14:10:42,287 DEBUG [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T14:10:42,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:10:42,289 DEBUG [RS:0;a3a61c9ba14f:42177 {}] zookeeper.ZKUtil(111): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a3a61c9ba14f,42177,1733148642030 2024-12-02T14:10:42,289 WARN [RS:0;a3a61c9ba14f:42177 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:10:42,289 INFO [RS:0;a3a61c9ba14f:42177 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:10:42,289 DEBUG [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030 2024-12-02T14:10:42,290 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a3a61c9ba14f,42177,1733148642030] 2024-12-02T14:10:42,293 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T14:10:42,296 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T14:10:42,297 INFO [RS:0;a3a61c9ba14f:42177 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T14:10:42,297 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,297 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T14:10:42,298 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T14:10:42,298 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,299 DEBUG [RS:0;a3a61c9ba14f:42177 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:10:42,299 DEBUG [RS:0;a3a61c9ba14f:42177 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:10:42,299 DEBUG [RS:0;a3a61c9ba14f:42177 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:10:42,299 DEBUG [RS:0;a3a61c9ba14f:42177 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:10:42,299 DEBUG [RS:0;a3a61c9ba14f:42177 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:10:42,299 DEBUG [RS:0;a3a61c9ba14f:42177 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:10:42,299 DEBUG [RS:0;a3a61c9ba14f:42177 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:10:42,299 DEBUG [RS:0;a3a61c9ba14f:42177 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:10:42,299 DEBUG [RS:0;a3a61c9ba14f:42177 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:10:42,299 DEBUG [RS:0;a3a61c9ba14f:42177 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:10:42,300 DEBUG [RS:0;a3a61c9ba14f:42177 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:10:42,300 DEBUG [RS:0;a3a61c9ba14f:42177 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:10:42,300 DEBUG [RS:0;a3a61c9ba14f:42177 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:10:42,300 DEBUG [RS:0;a3a61c9ba14f:42177 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:10:42,302 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,303 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,303 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,303 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,303 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,303 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,42177,1733148642030-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:10:42,315 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T14:10:42,316 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,42177,1733148642030-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,316 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,316 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.Replication(171): a3a61c9ba14f,42177,1733148642030 started 2024-12-02T14:10:42,329 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,329 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(1482): Serving as a3a61c9ba14f,42177,1733148642030, RpcServer on a3a61c9ba14f/172.17.0.2:42177, sessionid=0x1009b44e7190001 2024-12-02T14:10:42,329 DEBUG [RS:0;a3a61c9ba14f:42177 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T14:10:42,329 DEBUG [RS:0;a3a61c9ba14f:42177 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a3a61c9ba14f,42177,1733148642030 2024-12-02T14:10:42,329 DEBUG [RS:0;a3a61c9ba14f:42177 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,42177,1733148642030' 2024-12-02T14:10:42,329 DEBUG [RS:0;a3a61c9ba14f:42177 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T14:10:42,330 DEBUG [RS:0;a3a61c9ba14f:42177 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T14:10:42,330 DEBUG [RS:0;a3a61c9ba14f:42177 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T14:10:42,330 DEBUG [RS:0;a3a61c9ba14f:42177 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T14:10:42,330 DEBUG [RS:0;a3a61c9ba14f:42177 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a3a61c9ba14f,42177,1733148642030 2024-12-02T14:10:42,330 DEBUG [RS:0;a3a61c9ba14f:42177 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,42177,1733148642030' 2024-12-02T14:10:42,331 DEBUG [RS:0;a3a61c9ba14f:42177 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T14:10:42,331 DEBUG [RS:0;a3a61c9ba14f:42177 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T14:10:42,331 DEBUG [RS:0;a3a61c9ba14f:42177 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T14:10:42,331 INFO [RS:0;a3a61c9ba14f:42177 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T14:10:42,331 INFO [RS:0;a3a61c9ba14f:42177 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T14:10:42,414 WARN [a3a61c9ba14f:36523 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T14:10:42,433 INFO [RS:0;a3a61c9ba14f:42177 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C42177%2C1733148642030, suffix=, logDir=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030, archiveDir=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/oldWALs, maxLogs=32 2024-12-02T14:10:42,434 INFO [RS:0;a3a61c9ba14f:42177 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 2024-12-02T14:10:42,439 INFO [RS:0;a3a61c9ba14f:42177 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 2024-12-02T14:10:42,440 DEBUG [RS:0;a3a61c9ba14f:42177 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33571:33571),(127.0.0.1/127.0.0.1:44975:44975)] 2024-12-02T14:10:42,664 DEBUG [a3a61c9ba14f:36523 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T14:10:42,665 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a3a61c9ba14f,42177,1733148642030 2024-12-02T14:10:42,666 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a3a61c9ba14f,42177,1733148642030, state=OPENING 2024-12-02T14:10:42,668 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T14:10:42,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:42,669 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:10:42,669 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:10:42,670 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:10:42,670 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:10:42,670 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,42177,1733148642030}] 2024-12-02T14:10:42,823 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T14:10:42,825 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58219, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T14:10:42,828 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T14:10:42,829 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:10:42,830 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C42177%2C1733148642030.meta, suffix=.meta, logDir=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030, archiveDir=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/oldWALs, maxLogs=32 2024-12-02T14:10:42,831 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C42177%2C1733148642030.meta.1733148642831.meta 2024-12-02T14:10:42,836 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.meta.1733148642831.meta 2024-12-02T14:10:42,837 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44975:44975),(127.0.0.1/127.0.0.1:33571:33571)] 2024-12-02T14:10:42,842 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:10:42,842 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T14:10:42,842 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T14:10:42,842 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T14:10:42,842 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T14:10:42,843 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:10:42,843 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T14:10:42,843 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T14:10:42,845 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:10:42,846 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:10:42,846 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:42,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:10:42,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:10:42,848 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:10:42,848 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:42,848 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:10:42,849 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:10:42,849 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:10:42,849 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:42,850 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:10:42,850 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:10:42,850 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:10:42,851 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:42,851 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:10:42,851 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:10:42,852 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740 2024-12-02T14:10:42,853 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740 2024-12-02T14:10:42,855 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:10:42,855 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:10:42,855 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:10:42,856 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:10:42,857 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=775566, jitterRate=-0.013818055391311646}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:10:42,857 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T14:10:42,858 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733148642843Writing region info on filesystem at 1733148642843Initializing all the Stores at 1733148642844 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148642844Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148642845 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148642845Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148642845Cleaning up temporary data from old regions at 1733148642855 (+10 ms)Running coprocessor post-open hooks at 1733148642857 (+2 ms)Region opened successfully at 1733148642857 2024-12-02T14:10:42,858 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733148642823 2024-12-02T14:10:42,860 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T14:10:42,860 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T14:10:42,861 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a3a61c9ba14f,42177,1733148642030 2024-12-02T14:10:42,862 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a3a61c9ba14f,42177,1733148642030, state=OPEN 2024-12-02T14:10:42,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:10:42,864 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:10:42,864 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,42177,1733148642030 2024-12-02T14:10:42,864 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:10:42,864 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:10:42,867 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T14:10:42,868 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,42177,1733148642030 in 194 msec 2024-12-02T14:10:42,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T14:10:42,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 610 msec 2024-12-02T14:10:42,871 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:10:42,871 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T14:10:42,873 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:10:42,873 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a3a61c9ba14f,42177,1733148642030, seqNum=-1] 2024-12-02T14:10:42,873 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:10:42,874 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48049, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:10:42,880 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 689 msec 2024-12-02T14:10:42,880 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733148642880, completionTime=-1 2024-12-02T14:10:42,880 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T14:10:42,880 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T14:10:42,881 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-02T14:10:42,882 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733148702881 2024-12-02T14:10:42,882 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733148762882 2024-12-02T14:10:42,882 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-02T14:10:42,882 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,36523,1733148641978-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,882 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,36523,1733148641978-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,882 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,36523,1733148641978-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,882 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a3a61c9ba14f:36523, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,882 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,882 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:42,884 DEBUG [master/a3a61c9ba14f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T14:10:42,886 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.815sec 2024-12-02T14:10:42,886 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T14:10:42,886 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T14:10:42,886 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T14:10:42,886 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T14:10:42,886 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T14:10:42,886 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,36523,1733148641978-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:10:42,886 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,36523,1733148641978-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T14:10:42,889 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T14:10:42,889 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T14:10:42,889 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,36523,1733148641978-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:10:42,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:42,959 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63008d08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:10:42,959 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a3a61c9ba14f,36523,-1 for getting cluster id 2024-12-02T14:10:42,960 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T14:10:42,962 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f6f7c096-89c4-4098-bc4a-d85ccab25799' 2024-12-02T14:10:42,962 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T14:10:42,962 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f6f7c096-89c4-4098-bc4a-d85ccab25799" 2024-12-02T14:10:42,963 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@167ec013, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:10:42,963 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a3a61c9ba14f,36523,-1] 2024-12-02T14:10:42,963 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T14:10:42,963 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:10:42,965 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59982, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T14:10:42,967 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b9f6f01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:10:42,967 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:10:42,969 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a3a61c9ba14f,42177,1733148642030, seqNum=-1] 2024-12-02T14:10:42,970 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:10:42,973 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56092, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:10:42,975 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a3a61c9ba14f,36523,1733148641978 2024-12-02T14:10:42,975 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:10:42,979 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-02T14:10:42,979 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-02T14:10:42,979 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-02T14:10:42,979 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T14:10:42,981 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is a3a61c9ba14f,36523,1733148641978 2024-12-02T14:10:42,981 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2190bf19 2024-12-02T14:10:42,981 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T14:10:42,982 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59986, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T14:10:42,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36523 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T14:10:42,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36523 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T14:10:42,983 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36523 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T14:10:42,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36523 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-02T14:10:42,986 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T14:10:42,986 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:42,986 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36523 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-02T14:10:42,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36523 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T14:10:42,987 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T14:10:42,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741835_1011 (size=395) 2024-12-02T14:10:42,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36889 is added to blk_1073741835_1011 (size=395) 2024-12-02T14:10:42,995 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => af5c0bef14a0f761bb4269def746b379, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40 2024-12-02T14:10:43,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36889 is added to blk_1073741836_1012 (size=78) 2024-12-02T14:10:43,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41973 is added to blk_1073741836_1012 (size=78) 2024-12-02T14:10:43,002 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:10:43,002 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing af5c0bef14a0f761bb4269def746b379, disabling compactions & flushes 2024-12-02T14:10:43,002 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. 2024-12-02T14:10:43,002 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. 2024-12-02T14:10:43,002 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. after waiting 0 ms 2024-12-02T14:10:43,002 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. 2024-12-02T14:10:43,002 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. 2024-12-02T14:10:43,002 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for af5c0bef14a0f761bb4269def746b379: Waiting for close lock at 1733148643002Disabling compacts and flushes for region at 1733148643002Disabling writes for close at 1733148643002Writing region close event to WAL at 1733148643002Closed at 1733148643002 2024-12-02T14:10:43,004 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T14:10:43,004 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733148643004"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733148643004"}]},"ts":"1733148643004"} 2024-12-02T14:10:43,006 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T14:10:43,007 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T14:10:43,008 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733148643008"}]},"ts":"1733148643008"} 2024-12-02T14:10:43,010 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-02T14:10:43,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=af5c0bef14a0f761bb4269def746b379, ASSIGN}] 2024-12-02T14:10:43,011 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=af5c0bef14a0f761bb4269def746b379, ASSIGN 2024-12-02T14:10:43,012 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=af5c0bef14a0f761bb4269def746b379, ASSIGN; state=OFFLINE, location=a3a61c9ba14f,42177,1733148642030; forceNewPlan=false, retain=false 2024-12-02T14:10:43,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:43,163 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=af5c0bef14a0f761bb4269def746b379, regionState=OPENING, regionLocation=a3a61c9ba14f,42177,1733148642030 2024-12-02T14:10:43,169 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=af5c0bef14a0f761bb4269def746b379, ASSIGN because future has completed 2024-12-02T14:10:43,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure af5c0bef14a0f761bb4269def746b379, server=a3a61c9ba14f,42177,1733148642030}] 2024-12-02T14:10:43,337 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. 2024-12-02T14:10:43,337 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => af5c0bef14a0f761bb4269def746b379, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:10:43,338 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart af5c0bef14a0f761bb4269def746b379 2024-12-02T14:10:43,338 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:10:43,338 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for af5c0bef14a0f761bb4269def746b379 2024-12-02T14:10:43,338 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for af5c0bef14a0f761bb4269def746b379 2024-12-02T14:10:43,339 INFO [StoreOpener-af5c0bef14a0f761bb4269def746b379-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region af5c0bef14a0f761bb4269def746b379 2024-12-02T14:10:43,341 INFO [StoreOpener-af5c0bef14a0f761bb4269def746b379-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region af5c0bef14a0f761bb4269def746b379 columnFamilyName info 2024-12-02T14:10:43,341 DEBUG [StoreOpener-af5c0bef14a0f761bb4269def746b379-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:10:43,342 INFO [StoreOpener-af5c0bef14a0f761bb4269def746b379-1 {}] regionserver.HStore(327): Store=af5c0bef14a0f761bb4269def746b379/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:10:43,342 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for af5c0bef14a0f761bb4269def746b379 2024-12-02T14:10:43,343 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/default/TestLogRolling-testLogRollOnPipelineRestart/af5c0bef14a0f761bb4269def746b379 2024-12-02T14:10:43,343 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/default/TestLogRolling-testLogRollOnPipelineRestart/af5c0bef14a0f761bb4269def746b379 2024-12-02T14:10:43,344 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for af5c0bef14a0f761bb4269def746b379 2024-12-02T14:10:43,344 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for af5c0bef14a0f761bb4269def746b379 2024-12-02T14:10:43,345 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for af5c0bef14a0f761bb4269def746b379 2024-12-02T14:10:43,348 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/default/TestLogRolling-testLogRollOnPipelineRestart/af5c0bef14a0f761bb4269def746b379/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:10:43,348 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened af5c0bef14a0f761bb4269def746b379; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=845885, jitterRate=0.07559916377067566}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:10:43,348 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for af5c0bef14a0f761bb4269def746b379 2024-12-02T14:10:43,349 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for af5c0bef14a0f761bb4269def746b379: Running coprocessor pre-open hook at 1733148643338Writing region info on filesystem at 1733148643338Initializing all the Stores at 1733148643339 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148643339Cleaning up temporary data from old regions at 1733148643344 (+5 ms)Running coprocessor post-open hooks at 1733148643348 (+4 ms)Region opened successfully at 1733148643349 (+1 ms) 2024-12-02T14:10:43,350 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379., pid=6, masterSystemTime=1733148643326 2024-12-02T14:10:43,353 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. 2024-12-02T14:10:43,353 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. 2024-12-02T14:10:43,354 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=af5c0bef14a0f761bb4269def746b379, regionState=OPEN, openSeqNum=2, regionLocation=a3a61c9ba14f,42177,1733148642030 2024-12-02T14:10:43,355 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36523 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=a3a61c9ba14f,42177,1733148642030, table=TestLogRolling-testLogRollOnPipelineRestart, region=af5c0bef14a0f761bb4269def746b379. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-02T14:10:43,356 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure af5c0bef14a0f761bb4269def746b379, server=a3a61c9ba14f,42177,1733148642030 because future has completed 2024-12-02T14:10:43,360 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T14:10:43,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure af5c0bef14a0f761bb4269def746b379, server=a3a61c9ba14f,42177,1733148642030 in 188 msec 2024-12-02T14:10:43,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T14:10:43,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=af5c0bef14a0f761bb4269def746b379, ASSIGN in 350 msec 2024-12-02T14:10:43,364 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T14:10:43,365 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733148643365"}]},"ts":"1733148643365"} 2024-12-02T14:10:43,367 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-02T14:10:43,368 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T14:10:43,370 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 385 msec 2024-12-02T14:10:43,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:43,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:44,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:44,724 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T14:10:44,724 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-02T14:10:44,724 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-02T14:10:44,724 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-02T14:10:44,725 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T14:10:44,725 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-02T14:10:44,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:44,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:45,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:45,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:45,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:46,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:46,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:46,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:47,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:47,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:47,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:47,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:47,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:47,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:47,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:47,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:47,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:47,861 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:47,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:47,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:47,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:48,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:48,367 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T14:10:48,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:48,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:48,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:48,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:48,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:48,388 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:48,390 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:48,390 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:48,390 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:48,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:10:48,397 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T14:10:48,398 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-02T14:10:48,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:48,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:49,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:49,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:49,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:50,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:50,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:50,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:51,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:51,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:51,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:52,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:52,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:52,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:53,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36523 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T14:10:53,008 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-02T14:10:53,008 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-02T14:10:53,011 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-02T14:10:53,011 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. 2024-12-02T14:10:53,014 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379., hostname=a3a61c9ba14f,42177,1733148642030, seqNum=2] 2024-12-02T14:10:53,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:53,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:53,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:54,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:54,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:54,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:55,016 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 2024-12-02T14:10:55,017 WARN [ResponseProcessor for block BP-2116755885-172.17.0.2-1733148641423:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2116755885-172.17.0.2-1733148641423:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:55,017 WARN [ResponseProcessor for block BP-2116755885-172.17.0.2-1733148641423:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2116755885-172.17.0.2-1733148641423:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-2116755885-172.17.0.2-1733148641423:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:41973,DS-5001f710-acce-4575-9a21-435fcd93ab13,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:55,017 WARN [ResponseProcessor for block BP-2116755885-172.17.0.2-1733148641423:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2116755885-172.17.0.2-1733148641423:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-2116755885-172.17.0.2-1733148641423:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:41973,DS-5001f710-acce-4575-9a21-435fcd93ab13,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:55,018 WARN [DataStreamer for file /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 block BP-2116755885-172.17.0.2-1733148641423:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2116755885-172.17.0.2-1733148641423:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41973,DS-5001f710-acce-4575-9a21-435fcd93ab13,DISK], DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41973,DS-5001f710-acce-4575-9a21-435fcd93ab13,DISK]) is bad. 2024-12-02T14:10:55,018 WARN [DataStreamer for file /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.meta.1733148642831.meta block BP-2116755885-172.17.0.2-1733148641423:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2116755885-172.17.0.2-1733148641423:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK], DatanodeInfoWithStorage[127.0.0.1:41973,DS-5001f710-acce-4575-9a21-435fcd93ab13,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41973,DS-5001f710-acce-4575-9a21-435fcd93ab13,DISK]) is bad. 2024-12-02T14:10:55,018 WARN [DataStreamer for file /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/WALs/a3a61c9ba14f,36523,1733148641978/a3a61c9ba14f%2C36523%2C1733148641978.1733148642125 block BP-2116755885-172.17.0.2-1733148641423:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2116755885-172.17.0.2-1733148641423:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK], DatanodeInfoWithStorage[127.0.0.1:41973,DS-5001f710-acce-4575-9a21-435fcd93ab13,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41973,DS-5001f710-acce-4575-9a21-435fcd93ab13,DISK]) is bad. 2024-12-02T14:10:55,018 WARN [PacketResponder: BP-2116755885-172.17.0.2-1733148641423:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41973] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:55,018 WARN [PacketResponder: BP-2116755885-172.17.0.2-1733148641423:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41973] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:55,019 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_89774993_22 at /127.0.0.1:39304 [Receiving block BP-2116755885-172.17.0.2-1733148641423:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39304 dst: /127.0.0.1:36889 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:55,019 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883902368_22 at /127.0.0.1:41874 [Receiving block BP-2116755885-172.17.0.2-1733148641423:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41973:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41874 dst: /127.0.0.1:41973 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:55,019 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_89774993_22 at /127.0.0.1:41864 [Receiving block BP-2116755885-172.17.0.2-1733148641423:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41973:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41864 dst: /127.0.0.1:41973 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:55,019 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883902368_22 at /127.0.0.1:39336 [Receiving block BP-2116755885-172.17.0.2-1733148641423:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39336 dst: /127.0.0.1:36889 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:55,019 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883902368_22 at /127.0.0.1:41886 [Receiving block BP-2116755885-172.17.0.2-1733148641423:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41973:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41886 dst: /127.0.0.1:41973 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:55,020 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883902368_22 at /127.0.0.1:39324 [Receiving block BP-2116755885-172.17.0.2-1733148641423:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39324 dst: /127.0.0.1:36889 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:55,024 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6fab6db5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:55,025 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43a454f0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:10:55,025 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:10:55,025 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28441b3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:10:55,025 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11b4bf4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir/,STOPPED} 2024-12-02T14:10:55,027 WARN [BP-2116755885-172.17.0.2-1733148641423 heartbeating to localhost/127.0.0.1:46733 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:10:55,027 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:10:55,027 WARN [BP-2116755885-172.17.0.2-1733148641423 heartbeating to localhost/127.0.0.1:46733 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2116755885-172.17.0.2-1733148641423 (Datanode Uuid ffbefef4-3524-41ba-bac2-c52fc664f9c6) service to localhost/127.0.0.1:46733 2024-12-02T14:10:55,027 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:10:55,028 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data3/current/BP-2116755885-172.17.0.2-1733148641423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:55,028 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data4/current/BP-2116755885-172.17.0.2-1733148641423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:55,028 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:10:55,039 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:10:55,043 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:10:55,044 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:10:55,044 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:10:55,045 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T14:10:55,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c23b3ce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:10:55,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@487eadc3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:10:55,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30b55012{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/java.io.tmpdir/jetty-localhost-34109-hadoop-hdfs-3_4_1-tests_jar-_-any-16085849725039856953/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:55,134 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@371cd9e1{HTTP/1.1, (http/1.1)}{localhost:34109} 2024-12-02T14:10:55,134 INFO [Time-limited test {}] server.Server(415): Started @164796ms 2024-12-02T14:10:55,135 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:10:55,153 WARN [ResponseProcessor for block BP-2116755885-172.17.0.2-1733148641423:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2116755885-172.17.0.2-1733148641423:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:55,153 WARN [ResponseProcessor for block BP-2116755885-172.17.0.2-1733148641423:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2116755885-172.17.0.2-1733148641423:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:55,153 WARN [ResponseProcessor for block BP-2116755885-172.17.0.2-1733148641423:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2116755885-172.17.0.2-1733148641423:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:55,154 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883902368_22 at /127.0.0.1:44818 [Receiving block BP-2116755885-172.17.0.2-1733148641423:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44818 dst: /127.0.0.1:36889 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:55,154 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_89774993_22 at /127.0.0.1:44814 [Receiving block BP-2116755885-172.17.0.2-1733148641423:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44814 dst: /127.0.0.1:36889 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:55,154 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883902368_22 at /127.0.0.1:44816 [Receiving block BP-2116755885-172.17.0.2-1733148641423:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36889:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44816 dst: /127.0.0.1:36889 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:10:55,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60017892{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:55,156 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@fefca8b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:10:55,156 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:10:55,157 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66c0323e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:10:55,157 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@354edf1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir/,STOPPED} 2024-12-02T14:10:55,158 WARN [BP-2116755885-172.17.0.2-1733148641423 heartbeating to localhost/127.0.0.1:46733 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:10:55,158 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:10:55,158 WARN [BP-2116755885-172.17.0.2-1733148641423 heartbeating to localhost/127.0.0.1:46733 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2116755885-172.17.0.2-1733148641423 (Datanode Uuid 8da22597-c7c2-4ba5-a5c1-5bbf45bdc016) service to localhost/127.0.0.1:46733 2024-12-02T14:10:55,158 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:10:55,158 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data1/current/BP-2116755885-172.17.0.2-1733148641423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:55,159 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data2/current/BP-2116755885-172.17.0.2-1733148641423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:10:55,159 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:10:55,165 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:10:55,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:55,168 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:10:55,169 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:10:55,169 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:10:55,169 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:10:55,169 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b68c165{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:10:55,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b408bc7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:10:55,202 WARN [Thread-1338 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:10:55,204 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e0805332b6b9690 with lease ID 0xb31d765e312bc58f: from storage DS-5001f710-acce-4575-9a21-435fcd93ab13 node DatanodeRegistration(127.0.0.1:35101, datanodeUuid=ffbefef4-3524-41ba-bac2-c52fc664f9c6, infoPort=43757, infoSecurePort=0, ipcPort=34459, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:10:55,204 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e0805332b6b9690 with lease ID 0xb31d765e312bc58f: from storage DS-eb5d1eab-833f-471f-8592-87d275379606 node DatanodeRegistration(127.0.0.1:35101, datanodeUuid=ffbefef4-3524-41ba-bac2-c52fc664f9c6, infoPort=43757, infoSecurePort=0, ipcPort=34459, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:10:55,259 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5282eca5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/java.io.tmpdir/jetty-localhost-40621-hadoop-hdfs-3_4_1-tests_jar-_-any-224062036601255473/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:10:55,259 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@156f820b{HTTP/1.1, (http/1.1)}{localhost:40621} 2024-12-02T14:10:55,260 INFO [Time-limited test {}] server.Server(415): Started @164922ms 2024-12-02T14:10:55,261 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:10:55,344 WARN [Thread-1369 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:10:55,346 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfa64200742c74928 with lease ID 0xb31d765e312bc590: from storage DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0 node DatanodeRegistration(127.0.0.1:41899, datanodeUuid=8da22597-c7c2-4ba5-a5c1-5bbf45bdc016, infoPort=41287, infoSecurePort=0, ipcPort=37527, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:10:55,346 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfa64200742c74928 with lease ID 0xb31d765e312bc590: from storage DS-8672793c-9f2f-4502-9648-a69adbdd0729 node DatanodeRegistration(127.0.0.1:41899, datanodeUuid=8da22597-c7c2-4ba5-a5c1-5bbf45bdc016, infoPort=41287, infoSecurePort=0, ipcPort=37527, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:10:55,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:55,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:56,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:56,304 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-02T14:10:56,309 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-02T14:10:56,311 ERROR [FSHLog-0-hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40-prefix:a3a61c9ba14f,42177,1733148642030 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:56,311 WARN [FSHLog-0-hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40-prefix:a3a61c9ba14f,42177,1733148642030 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:56,311 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C42177%2C1733148642030:(num 1733148642434) roll requested 2024-12-02T14:10:56,312 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 2024-12-02T14:10:56,318 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 newFile=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 2024-12-02T14:10:56,318 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:56,318 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:56,318 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:56,318 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:56,318 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:10:56,319 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 2024-12-02T14:10:56,319 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:56,319 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:10:56,319 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 2024-12-02T14:10:56,319 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43757:43757),(127.0.0.1/127.0.0.1:41287:41287)] 2024-12-02T14:10:56,319 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 is not closed yet, will try archiving it next time 2024-12-02T14:10:56,320 WARN [IPC Server handler 0 on default port 46733 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-12-02T14:10:56,320 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 after 1ms 2024-12-02T14:10:56,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:56,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:57,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:57,206 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-02T14:10:57,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:57,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:58,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:58,327 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-02T14:10:58,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:58,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:59,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:59,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:10:59,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:00,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:00,321 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 after 4001ms 2024-12-02T14:11:00,330 WARN [ResponseProcessor for block BP-2116755885-172.17.0.2-1733148641423:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2116755885-172.17.0.2-1733148641423:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-2116755885-172.17.0.2-1733148641423:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:41899,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:00,330 WARN [DataStreamer for file /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 block BP-2116755885-172.17.0.2-1733148641423:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2116755885-172.17.0.2-1733148641423:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35101,DS-5001f710-acce-4575-9a21-435fcd93ab13,DISK], DatanodeInfoWithStorage[127.0.0.1:41899,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41899,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]) is bad. 2024-12-02T14:11:00,330 WARN [PacketResponder: BP-2116755885-172.17.0.2-1733148641423:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41899] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:11:00,331 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883902368_22 at /127.0.0.1:56788 [Receiving block BP-2116755885-172.17.0.2-1733148641423:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35101:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56788 dst: /127.0.0.1:35101 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:11:00,331 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883902368_22 at /127.0.0.1:51726 [Receiving block BP-2116755885-172.17.0.2-1733148641423:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41899:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51726 dst: /127.0.0.1:41899 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:11:00,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5282eca5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:11:00,333 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@156f820b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:11:00,333 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:11:00,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b408bc7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:11:00,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b68c165{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir/,STOPPED} 2024-12-02T14:11:00,334 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:11:00,334 WARN [BP-2116755885-172.17.0.2-1733148641423 heartbeating to localhost/127.0.0.1:46733 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:11:00,334 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:11:00,334 WARN [BP-2116755885-172.17.0.2-1733148641423 heartbeating to localhost/127.0.0.1:46733 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2116755885-172.17.0.2-1733148641423 (Datanode Uuid 8da22597-c7c2-4ba5-a5c1-5bbf45bdc016) service to localhost/127.0.0.1:46733 2024-12-02T14:11:00,335 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data1/current/BP-2116755885-172.17.0.2-1733148641423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:11:00,335 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data2/current/BP-2116755885-172.17.0.2-1733148641423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:11:00,335 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:11:00,342 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:11:00,345 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:11:00,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:11:00,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:11:00,346 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T14:11:00,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d151a18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:11:00,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b944a8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:11:00,438 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27f5a072{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/java.io.tmpdir/jetty-localhost-39413-hadoop-hdfs-3_4_1-tests_jar-_-any-9231142341749854417/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:11:00,439 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3a397072{HTTP/1.1, (http/1.1)}{localhost:39413} 2024-12-02T14:11:00,439 INFO [Time-limited test {}] server.Server(415): Started @170101ms 2024-12-02T14:11:00,440 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:11:00,455 WARN [ResponseProcessor for block BP-2116755885-172.17.0.2-1733148641423:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2116755885-172.17.0.2-1733148641423:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:00,455 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-883902368_22 at /127.0.0.1:54170 [Receiving block BP-2116755885-172.17.0.2-1733148641423:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35101:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54170 dst: /127.0.0.1:35101 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:11:00,458 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30b55012{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:11:00,458 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@371cd9e1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:11:00,459 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:11:00,459 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@487eadc3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:11:00,459 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c23b3ce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir/,STOPPED} 2024-12-02T14:11:00,460 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:11:00,460 WARN [BP-2116755885-172.17.0.2-1733148641423 heartbeating to localhost/127.0.0.1:46733 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:11:00,460 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:11:00,460 WARN [BP-2116755885-172.17.0.2-1733148641423 heartbeating to localhost/127.0.0.1:46733 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2116755885-172.17.0.2-1733148641423 (Datanode Uuid ffbefef4-3524-41ba-bac2-c52fc664f9c6) service to localhost/127.0.0.1:46733 2024-12-02T14:11:00,460 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data3/current/BP-2116755885-172.17.0.2-1733148641423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:11:00,460 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data4/current/BP-2116755885-172.17.0.2-1733148641423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:11:00,460 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:11:00,475 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:11:00,479 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:11:00,491 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:11:00,491 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:11:00,491 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:11:00,494 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2392cae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:11:00,495 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5aaed393{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:11:00,526 WARN [Thread-1412 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:11:00,528 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8f581ef85320a7da with lease ID 0xb31d765e312bc591: from storage DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0 node DatanodeRegistration(127.0.0.1:38739, datanodeUuid=8da22597-c7c2-4ba5-a5c1-5bbf45bdc016, infoPort=45333, infoSecurePort=0, ipcPort=42647, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:11:00,528 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8f581ef85320a7da with lease ID 0xb31d765e312bc591: from storage DS-8672793c-9f2f-4502-9648-a69adbdd0729 node DatanodeRegistration(127.0.0.1:38739, datanodeUuid=8da22597-c7c2-4ba5-a5c1-5bbf45bdc016, infoPort=45333, infoSecurePort=0, ipcPort=42647, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:11:00,590 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6a90fb45{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/java.io.tmpdir/jetty-localhost-42133-hadoop-hdfs-3_4_1-tests_jar-_-any-1607051429984037316/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:11:00,590 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1dad3af2{HTTP/1.1, (http/1.1)}{localhost:42133} 2024-12-02T14:11:00,590 INFO [Time-limited test {}] server.Server(415): Started @170253ms 2024-12-02T14:11:00,591 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:11:00,655 WARN [Thread-1443 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:11:00,657 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfeea0775a8d6a430 with lease ID 0xb31d765e312bc592: from storage DS-5001f710-acce-4575-9a21-435fcd93ab13 node DatanodeRegistration(127.0.0.1:40013, datanodeUuid=ffbefef4-3524-41ba-bac2-c52fc664f9c6, infoPort=43563, infoSecurePort=0, ipcPort=38505, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T14:11:00,657 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfeea0775a8d6a430 with lease ID 0xb31d765e312bc592: from storage DS-eb5d1eab-833f-471f-8592-87d275379606 node DatanodeRegistration(127.0.0.1:40013, datanodeUuid=ffbefef4-3524-41ba-bac2-c52fc664f9c6, infoPort=43563, infoSecurePort=0, ipcPort=38505, storageInfo=lv=-57;cid=testClusterID;nsid=472209451;c=1733148641423), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:11:00,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:00,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:01,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:01,607 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-02T14:11:01,611 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-02T14:11:01,614 ERROR [FSHLog-0-hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40-prefix:a3a61c9ba14f,42177,1733148642030 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35101,DS-5001f710-acce-4575-9a21-435fcd93ab13,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:01,615 WARN [FSHLog-0-hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40-prefix:a3a61c9ba14f,42177,1733148642030 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35101,DS-5001f710-acce-4575-9a21-435fcd93ab13,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:01,615 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C42177%2C1733148642030:(num 1733148656311) roll requested 2024-12-02T14:11:01,615 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C42177%2C1733148642030.1733148661615 2024-12-02T14:11:01,623 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 newFile=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148661615 2024-12-02T14:11:01,623 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:01,624 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:01,624 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:01,624 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:01,624 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:01,624 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148661615 2024-12-02T14:11:01,625 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35101,DS-5001f710-acce-4575-9a21-435fcd93ab13,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:01,625 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35101,DS-5001f710-acce-4575-9a21-435fcd93ab13,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:01,625 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 2024-12-02T14:11:01,625 WARN [IPC Server handler 1 on default port 46733 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-02T14:11:01,625 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45333:45333),(127.0.0.1/127.0.0.1:43563:43563)] 2024-12-02T14:11:01,626 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 is not closed yet, will try archiving it next time 2024-12-02T14:11:01,626 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 after 1ms 2024-12-02T14:11:01,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:01,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:02,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:02,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:02,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:03,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:03,627 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 2024-12-02T14:11:03,636 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148661615 newFile=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 2024-12-02T14:11:03,637 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:03,637 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:03,637 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:03,637 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:03,637 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:03,638 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148661615 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 2024-12-02T14:11:03,639 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45333:45333),(127.0.0.1/127.0.0.1:43563:43563)] 2024-12-02T14:11:03,639 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 is not closed yet, will try archiving it next time 2024-12-02T14:11:03,639 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148661615 is not closed yet, will try archiving it next time 2024-12-02T14:11:03,640 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 2024-12-02T14:11:03,640 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 2024-12-02T14:11:03,641 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 after 1ms 2024-12-02T14:11:03,641 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 2024-12-02T14:11:03,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40013 is added to blk_1073741838_1019 (size=1264) 2024-12-02T14:11:03,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38739 is added to blk_1073741838_1019 (size=1264) 2024-12-02T14:11:03,642 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 is not closed yet, will try archiving it next time 2024-12-02T14:11:03,650 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733148643349/Put/vlen=218/seqid=0] 2024-12-02T14:11:03,650 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733148653015/Put/vlen=1045/seqid=0] 2024-12-02T14:11:03,651 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148642434 2024-12-02T14:11:03,651 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 2024-12-02T14:11:03,651 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 2024-12-02T14:11:03,651 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 after 0ms 2024-12-02T14:11:03,651 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 2024-12-02T14:11:03,654 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733148656311/Put/vlen=1045/seqid=0] 2024-12-02T14:11:03,654 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733148658328/Put/vlen=1045/seqid=0] 2024-12-02T14:11:03,654 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 2024-12-02T14:11:03,654 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148661615 2024-12-02T14:11:03,654 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148661615 2024-12-02T14:11:03,654 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148661615 after 0ms 2024-12-02T14:11:03,654 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148661615 2024-12-02T14:11:03,657 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733148661614/Put/vlen=1045/seqid=0] 2024-12-02T14:11:03,657 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 2024-12-02T14:11:03,657 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 2024-12-02T14:11:03,658 WARN [IPC Server handler 2 on default port 46733 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-02T14:11:03,658 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 after 1ms 2024-12-02T14:11:03,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:03,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:04,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:04,661 WARN [ResponseProcessor for block BP-2116755885-172.17.0.2-1733148641423:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2116755885-172.17.0.2-1733148641423:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:04,661 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_89774993_22 at /127.0.0.1:56654 [Receiving block BP-2116755885-172.17.0.2-1733148641423:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:38739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56654 dst: /127.0.0.1:38739 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:38739 remote=/127.0.0.1:56654]. Total timeout mills is 60000, 58974 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:11:04,661 WARN [DataStreamer for file /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 block BP-2116755885-172.17.0.2-1733148641423:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2116755885-172.17.0.2-1733148641423:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38739,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK], DatanodeInfoWithStorage[127.0.0.1:40013,DS-5001f710-acce-4575-9a21-435fcd93ab13,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38739,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]) is bad. 2024-12-02T14:11:04,661 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_89774993_22 at /127.0.0.1:40138 [Receiving block BP-2116755885-172.17.0.2-1733148641423:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:40013:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40138 dst: /127.0.0.1:40013 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:11:04,665 WARN [DataStreamer for file /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 block BP-2116755885-172.17.0.2-1733148641423:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2116755885-172.17.0.2-1733148641423:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:04,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38739 is added to blk_1073741839_1022 (size=85) 2024-12-02T14:11:04,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:04,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:05,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:05,627 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148656311 after 4002ms 2024-12-02T14:11:05,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:05,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:06,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:06,529 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-02T14:11:06,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:06,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:07,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:07,659 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 after 4002ms 2024-12-02T14:11:07,659 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 2024-12-02T14:11:07,663 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 2024-12-02T14:11:07,663 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing af5c0bef14a0f761bb4269def746b379 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-02T14:11:07,663 ERROR [FSHLog-0-hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40-prefix:a3a61c9ba14f,42177,1733148642030 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2116755885-172.17.0.2-1733148641423:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:07,664 WARN [FSHLog-0-hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40-prefix:a3a61c9ba14f,42177,1733148642030 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2116755885-172.17.0.2-1733148641423:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:07,664 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C42177%2C1733148642030:(num 1733148663627) roll requested 2024-12-02T14:11:07,664 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C42177%2C1733148642030.1733148667664 2024-12-02T14:11:07,671 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 newFile=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148667664 2024-12-02T14:11:07,671 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:07,671 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:07,671 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:07,671 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:07,671 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:07,672 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148667664 2024-12-02T14:11:07,672 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2116755885-172.17.0.2-1733148641423:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:07,672 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2116755885-172.17.0.2-1733148641423:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:07,672 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 2024-12-02T14:11:07,673 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 after 1ms 2024-12-02T14:11:07,673 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 to hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/oldWALs/a3a61c9ba14f%2C42177%2C1733148642030.1733148663627 2024-12-02T14:11:07,673 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43563:43563),(127.0.0.1/127.0.0.1:45333:45333)] 2024-12-02T14:11:07,690 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/default/TestLogRolling-testLogRollOnPipelineRestart/af5c0bef14a0f761bb4269def746b379/.tmp/info/3c112321db5740ffb951c7dea5841b5b is 1080, key is row1002/info:/1733148653015/Put/seqid=0 2024-12-02T14:11:07,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38739 is added to blk_1073741841_1024 (size=9270) 2024-12-02T14:11:07,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40013 is added to blk_1073741841_1024 (size=9270) 2024-12-02T14:11:07,699 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/default/TestLogRolling-testLogRollOnPipelineRestart/af5c0bef14a0f761bb4269def746b379/.tmp/info/3c112321db5740ffb951c7dea5841b5b 2024-12-02T14:11:07,706 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/default/TestLogRolling-testLogRollOnPipelineRestart/af5c0bef14a0f761bb4269def746b379/.tmp/info/3c112321db5740ffb951c7dea5841b5b as hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/default/TestLogRolling-testLogRollOnPipelineRestart/af5c0bef14a0f761bb4269def746b379/info/3c112321db5740ffb951c7dea5841b5b 2024-12-02T14:11:07,714 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/default/TestLogRolling-testLogRollOnPipelineRestart/af5c0bef14a0f761bb4269def746b379/info/3c112321db5740ffb951c7dea5841b5b, entries=4, sequenceid=8, filesize=9.1 K 2024-12-02T14:11:07,716 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for af5c0bef14a0f761bb4269def746b379 in 52ms, sequenceid=8, compaction requested=false 2024-12-02T14:11:07,716 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for af5c0bef14a0f761bb4269def746b379: 2024-12-02T14:11:07,716 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-02T14:11:07,716 ERROR [FSHLog-0-hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40-prefix:a3a61c9ba14f,42177,1733148642030.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:07,716 WARN [FSHLog-0-hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40-prefix:a3a61c9ba14f,42177,1733148642030.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:07,717 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C42177%2C1733148642030.meta:.meta(num 1733148642831) roll requested 2024-12-02T14:11:07,717 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C42177%2C1733148642030.meta.1733148667717.meta 2024-12-02T14:11:07,725 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:07,725 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:07,725 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:07,725 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:07,725 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:07,726 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.meta.1733148642831.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.meta.1733148667717.meta 2024-12-02T14:11:07,728 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:07,728 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:07,728 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.meta.1733148642831.meta 2024-12-02T14:11:07,728 WARN [IPC Server handler 3 on default port 46733 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.meta.1733148642831.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1014 2024-12-02T14:11:07,729 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.meta.1733148642831.meta after 1ms 2024-12-02T14:11:07,731 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43563:43563),(127.0.0.1/127.0.0.1:45333:45333)] 2024-12-02T14:11:07,731 DEBUG [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.meta.1733148642831.meta is not closed yet, will try archiving it next time 2024-12-02T14:11:07,748 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/.tmp/info/a0579f22a48b44baa69081f393ce8f35 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379./info:regioninfo/1733148643354/Put/seqid=0 2024-12-02T14:11:07,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38739 is added to blk_1073741843_1027 (size=7125) 2024-12-02T14:11:07,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40013 is added to blk_1073741843_1027 (size=7125) 2024-12-02T14:11:07,755 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/.tmp/info/a0579f22a48b44baa69081f393ce8f35 2024-12-02T14:11:07,777 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/.tmp/ns/e64e0ce4f6344c40be360b691329dc53 is 43, key is default/ns:d/1733148642874/Put/seqid=0 2024-12-02T14:11:07,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38739 is added to blk_1073741844_1028 (size=5153) 2024-12-02T14:11:07,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40013 is added to blk_1073741844_1028 (size=5153) 2024-12-02T14:11:07,790 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/.tmp/ns/e64e0ce4f6344c40be360b691329dc53 2024-12-02T14:11:07,820 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/.tmp/table/a147573115c04715a3d32b9273a35b50 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733148643365/Put/seqid=0 2024-12-02T14:11:07,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38739 is added to blk_1073741845_1029 (size=5438) 2024-12-02T14:11:07,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40013 is added to blk_1073741845_1029 (size=5438) 2024-12-02T14:11:07,826 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/.tmp/table/a147573115c04715a3d32b9273a35b50 2024-12-02T14:11:07,832 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/.tmp/info/a0579f22a48b44baa69081f393ce8f35 as hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/info/a0579f22a48b44baa69081f393ce8f35 2024-12-02T14:11:07,838 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/info/a0579f22a48b44baa69081f393ce8f35, entries=10, sequenceid=11, filesize=7.0 K 2024-12-02T14:11:07,839 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/.tmp/ns/e64e0ce4f6344c40be360b691329dc53 as hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/ns/e64e0ce4f6344c40be360b691329dc53 2024-12-02T14:11:07,846 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/ns/e64e0ce4f6344c40be360b691329dc53, entries=2, sequenceid=11, filesize=5.0 K 2024-12-02T14:11:07,848 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/.tmp/table/a147573115c04715a3d32b9273a35b50 as hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/table/a147573115c04715a3d32b9273a35b50 2024-12-02T14:11:07,855 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/table/a147573115c04715a3d32b9273a35b50, entries=2, sequenceid=11, filesize=5.3 K 2024-12-02T14:11:07,856 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 140ms, sequenceid=11, compaction requested=false 2024-12-02T14:11:07,856 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-02T14:11:07,861 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T14:11:07,861 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:11:07,861 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:11:07,861 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:11:07,861 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:11:07,861 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T14:11:07,861 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T14:11:07,861 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1451618077, stopped=false 2024-12-02T14:11:07,862 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a3a61c9ba14f,36523,1733148641978 2024-12-02T14:11:07,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:11:07,863 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:11:07,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:07,863 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:07,863 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:11:07,863 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:11:07,863 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:11:07,863 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:11:07,863 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:11:07,863 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:11:07,863 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a3a61c9ba14f,42177,1733148642030' ***** 2024-12-02T14:11:07,863 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T14:11:07,864 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T14:11:07,864 INFO [RS:0;a3a61c9ba14f:42177 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T14:11:07,864 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T14:11:07,864 INFO [RS:0;a3a61c9ba14f:42177 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T14:11:07,864 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(3091): Received CLOSE for af5c0bef14a0f761bb4269def746b379 2024-12-02T14:11:07,864 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(959): stopping server a3a61c9ba14f,42177,1733148642030 2024-12-02T14:11:07,864 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:11:07,864 INFO [RS:0;a3a61c9ba14f:42177 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a3a61c9ba14f:42177. 2024-12-02T14:11:07,864 DEBUG [RS:0;a3a61c9ba14f:42177 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:11:07,864 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing af5c0bef14a0f761bb4269def746b379, disabling compactions & flushes 2024-12-02T14:11:07,864 DEBUG [RS:0;a3a61c9ba14f:42177 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:11:07,864 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. 2024-12-02T14:11:07,864 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T14:11:07,864 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. 2024-12-02T14:11:07,864 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T14:11:07,864 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. after waiting 0 ms 2024-12-02T14:11:07,864 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T14:11:07,864 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. 2024-12-02T14:11:07,864 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T14:11:07,865 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-02T14:11:07,865 DEBUG [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(1325): Online Regions={af5c0bef14a0f761bb4269def746b379=TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379., 1588230740=hbase:meta,,1.1588230740} 2024-12-02T14:11:07,865 DEBUG [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, af5c0bef14a0f761bb4269def746b379 2024-12-02T14:11:07,865 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:11:07,865 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:11:07,865 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:11:07,865 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:11:07,865 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:11:07,868 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-02T14:11:07,868 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/data/default/TestLogRolling-testLogRollOnPipelineRestart/af5c0bef14a0f761bb4269def746b379/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-02T14:11:07,869 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. 2024-12-02T14:11:07,869 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T14:11:07,869 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for af5c0bef14a0f761bb4269def746b379: Waiting for close lock at 1733148667864Running coprocessor pre-close hooks at 1733148667864Disabling compacts and flushes for region at 1733148667864Disabling writes for close at 1733148667864Writing region close event to WAL at 1733148667865 (+1 ms)Running coprocessor post-close hooks at 1733148667869 (+4 ms)Closed at 1733148667869 2024-12-02T14:11:07,869 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:11:07,869 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733148667865Running coprocessor pre-close hooks at 1733148667865Disabling compacts and flushes for region at 1733148667865Disabling writes for close at 1733148667865Writing region close event to WAL at 1733148667866 (+1 ms)Running coprocessor post-close hooks at 1733148667869 (+3 ms)Closed at 1733148667869 2024-12-02T14:11:07,869 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733148642983.af5c0bef14a0f761bb4269def746b379. 2024-12-02T14:11:07,869 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T14:11:07,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:07,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:08,065 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(976): stopping server a3a61c9ba14f,42177,1733148642030; all regions closed. 2024-12-02T14:11:08,066 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:08,066 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:08,067 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:08,067 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:08,068 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:08,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38739 is added to blk_1073741842_1025 (size=825) 2024-12-02T14:11:08,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40013 is added to blk_1073741842_1025 (size=825) 2024-12-02T14:11:08,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:08,305 INFO [regionserver/a3a61c9ba14f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:11:08,356 INFO [regionserver/a3a61c9ba14f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T14:11:08,356 INFO [regionserver/a3a61c9ba14f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T14:11:08,658 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-02T14:11:08,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:08,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:09,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:09,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:09,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:10,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:10,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:10,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:11,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:11,729 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.meta.1733148642831.meta after 4001ms 2024-12-02T14:11:11,730 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/WALs/a3a61c9ba14f,42177,1733148642030/a3a61c9ba14f%2C42177%2C1733148642030.meta.1733148642831.meta to hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/oldWALs/a3a61c9ba14f%2C42177%2C1733148642030.meta.1733148642831.meta 2024-12-02T14:11:11,733 DEBUG [RS:0;a3a61c9ba14f:42177 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/oldWALs 2024-12-02T14:11:11,733 INFO [RS:0;a3a61c9ba14f:42177 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C42177%2C1733148642030.meta:.meta(num 1733148667717) 2024-12-02T14:11:11,733 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:11,733 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:11,734 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:11,734 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:11,734 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:11,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40013 is added to blk_1073741840_1023 (size=1162) 2024-12-02T14:11:11,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38739 is added to blk_1073741840_1023 (size=1162) 2024-12-02T14:11:11,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:11,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:11,960 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T14:11:12,142 DEBUG [RS:0;a3a61c9ba14f:42177 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/oldWALs 2024-12-02T14:11:12,142 INFO [RS:0;a3a61c9ba14f:42177 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C42177%2C1733148642030:(num 1733148667664) 2024-12-02T14:11:12,142 DEBUG [RS:0;a3a61c9ba14f:42177 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:11:12,142 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:11:12,143 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:11:12,143 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.ChoreService(370): Chore service for: regionserver/a3a61c9ba14f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T14:11:12,143 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:11:12,143 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:11:12,143 INFO [RS:0;a3a61c9ba14f:42177 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42177 2024-12-02T14:11:12,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:11:12,145 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a3a61c9ba14f,42177,1733148642030 2024-12-02T14:11:12,145 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:11:12,147 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a3a61c9ba14f,42177,1733148642030] 2024-12-02T14:11:12,148 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a3a61c9ba14f,42177,1733148642030 already deleted, retry=false 2024-12-02T14:11:12,148 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a3a61c9ba14f,42177,1733148642030 expired; onlineServers=0 2024-12-02T14:11:12,148 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a3a61c9ba14f,36523,1733148641978' ***** 2024-12-02T14:11:12,148 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T14:11:12,149 INFO [M:0;a3a61c9ba14f:36523 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:11:12,149 INFO [M:0;a3a61c9ba14f:36523 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:11:12,149 DEBUG [M:0;a3a61c9ba14f:36523 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T14:11:12,149 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T14:11:12,149 DEBUG [M:0;a3a61c9ba14f:36523 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T14:11:12,149 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148642205 {}] cleaner.HFileCleaner(306): Exit Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148642205,5,FailOnTimeoutGroup] 2024-12-02T14:11:12,149 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148642201 {}] cleaner.HFileCleaner(306): Exit Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148642201,5,FailOnTimeoutGroup] 2024-12-02T14:11:12,149 INFO [M:0;a3a61c9ba14f:36523 {}] hbase.ChoreService(370): Chore service for: master/a3a61c9ba14f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T14:11:12,149 INFO [M:0;a3a61c9ba14f:36523 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:11:12,149 DEBUG [M:0;a3a61c9ba14f:36523 {}] master.HMaster(1795): Stopping service threads 2024-12-02T14:11:12,149 INFO [M:0;a3a61c9ba14f:36523 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T14:11:12,149 INFO [M:0;a3a61c9ba14f:36523 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:11:12,150 INFO [M:0;a3a61c9ba14f:36523 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T14:11:12,150 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T14:11:12,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T14:11:12,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:12,150 DEBUG [M:0;a3a61c9ba14f:36523 {}] zookeeper.ZKUtil(347): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T14:11:12,150 WARN [M:0;a3a61c9ba14f:36523 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T14:11:12,150 INFO [M:0;a3a61c9ba14f:36523 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/.lastflushedseqids 2024-12-02T14:11:12,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40013 is added to blk_1073741846_1030 (size=130) 2024-12-02T14:11:12,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38739 is added to blk_1073741846_1030 (size=130) 2024-12-02T14:11:12,156 INFO [M:0;a3a61c9ba14f:36523 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T14:11:12,156 INFO [M:0;a3a61c9ba14f:36523 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T14:11:12,156 DEBUG [M:0;a3a61c9ba14f:36523 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:11:12,156 INFO [M:0;a3a61c9ba14f:36523 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:11:12,156 DEBUG [M:0;a3a61c9ba14f:36523 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:11:12,156 DEBUG [M:0;a3a61c9ba14f:36523 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:11:12,156 DEBUG [M:0;a3a61c9ba14f:36523 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:11:12,156 INFO [M:0;a3a61c9ba14f:36523 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-12-02T14:11:12,157 ERROR [FSHLog-0-hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData-prefix:a3a61c9ba14f,36523,1733148641978 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:12,157 WARN [FSHLog-0-hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData-prefix:a3a61c9ba14f,36523,1733148641978 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:12,157 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog a3a61c9ba14f%2C36523%2C1733148641978:(num 1733148642125) roll requested 2024-12-02T14:11:12,157 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C36523%2C1733148641978.1733148672157 2024-12-02T14:11:12,161 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:12,161 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:12,162 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:12,162 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:12,162 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:12,162 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/WALs/a3a61c9ba14f,36523,1733148641978/a3a61c9ba14f%2C36523%2C1733148641978.1733148642125 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/WALs/a3a61c9ba14f,36523,1733148641978/a3a61c9ba14f%2C36523%2C1733148641978.1733148672157 2024-12-02T14:11:12,162 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:12,162 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36889,DS-779d0608-4bb8-47b9-8d7d-9234cb3512b0,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-02T14:11:12,162 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/WALs/a3a61c9ba14f,36523,1733148641978/a3a61c9ba14f%2C36523%2C1733148641978.1733148642125 2024-12-02T14:11:12,162 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43563:43563),(127.0.0.1/127.0.0.1:45333:45333)] 2024-12-02T14:11:12,163 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/WALs/a3a61c9ba14f,36523,1733148641978/a3a61c9ba14f%2C36523%2C1733148641978.1733148642125 is not closed yet, will try archiving it next time 2024-12-02T14:11:12,163 WARN [IPC Server handler 1 on default port 46733 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/WALs/a3a61c9ba14f,36523,1733148641978/a3a61c9ba14f%2C36523%2C1733148641978.1733148642125 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-12-02T14:11:12,163 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/WALs/a3a61c9ba14f,36523,1733148641978/a3a61c9ba14f%2C36523%2C1733148641978.1733148642125 after 1ms 2024-12-02T14:11:12,175 DEBUG [M:0;a3a61c9ba14f:36523 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10020a88d5ca445fa8badff4e574a487 is 82, key is hbase:meta,,1/info:regioninfo/1733148642861/Put/seqid=0 2024-12-02T14:11:12,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38739 is added to blk_1073741848_1033 (size=5672) 2024-12-02T14:11:12,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40013 is added to blk_1073741848_1033 (size=5672) 2024-12-02T14:11:12,180 INFO [M:0;a3a61c9ba14f:36523 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10020a88d5ca445fa8badff4e574a487 2024-12-02T14:11:12,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:12,199 DEBUG [M:0;a3a61c9ba14f:36523 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aa14effc3cf742cfa322856d04609c82 is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733148643369/Put/seqid=0 2024-12-02T14:11:12,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38739 is added to blk_1073741849_1034 (size=6117) 2024-12-02T14:11:12,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40013 is added to blk_1073741849_1034 (size=6117) 2024-12-02T14:11:12,204 INFO [M:0;a3a61c9ba14f:36523 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aa14effc3cf742cfa322856d04609c82 2024-12-02T14:11:12,224 DEBUG [M:0;a3a61c9ba14f:36523 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3f8479cb70654b73b913cbce9c83aa80 is 69, key is a3a61c9ba14f,42177,1733148642030/rs:state/1733148642286/Put/seqid=0 2024-12-02T14:11:12,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40013 is added to blk_1073741850_1035 (size=5156) 2024-12-02T14:11:12,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38739 is added to blk_1073741850_1035 (size=5156) 2024-12-02T14:11:12,229 INFO [M:0;a3a61c9ba14f:36523 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3f8479cb70654b73b913cbce9c83aa80 2024-12-02T14:11:12,247 DEBUG [M:0;a3a61c9ba14f:36523 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5c856da049f44ee0a838188a6fd955fb is 52, key is load_balancer_on/state:d/1733148642977/Put/seqid=0 2024-12-02T14:11:12,247 INFO [RS:0;a3a61c9ba14f:42177 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:11:12,247 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:11:12,247 INFO [RS:0;a3a61c9ba14f:42177 {}] regionserver.HRegionServer(1031): Exiting; stopping=a3a61c9ba14f,42177,1733148642030; zookeeper connection closed. 2024-12-02T14:11:12,247 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42177-0x1009b44e7190001, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:11:12,247 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@15342b96 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@15342b96 2024-12-02T14:11:12,247 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T14:11:12,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40013 is added to blk_1073741851_1036 (size=5056) 2024-12-02T14:11:12,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38739 is added to blk_1073741851_1036 (size=5056) 2024-12-02T14:11:12,251 INFO [M:0;a3a61c9ba14f:36523 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5c856da049f44ee0a838188a6fd955fb 2024-12-02T14:11:12,256 DEBUG [M:0;a3a61c9ba14f:36523 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10020a88d5ca445fa8badff4e574a487 as hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/10020a88d5ca445fa8badff4e574a487 2024-12-02T14:11:12,261 INFO [M:0;a3a61c9ba14f:36523 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/10020a88d5ca445fa8badff4e574a487, entries=8, sequenceid=56, filesize=5.5 K 2024-12-02T14:11:12,262 DEBUG [M:0;a3a61c9ba14f:36523 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/aa14effc3cf742cfa322856d04609c82 as hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/aa14effc3cf742cfa322856d04609c82 2024-12-02T14:11:12,268 INFO [M:0;a3a61c9ba14f:36523 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/aa14effc3cf742cfa322856d04609c82, entries=6, sequenceid=56, filesize=6.0 K 2024-12-02T14:11:12,269 DEBUG [M:0;a3a61c9ba14f:36523 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3f8479cb70654b73b913cbce9c83aa80 as hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3f8479cb70654b73b913cbce9c83aa80 2024-12-02T14:11:12,274 INFO [M:0;a3a61c9ba14f:36523 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3f8479cb70654b73b913cbce9c83aa80, entries=1, sequenceid=56, filesize=5.0 K 2024-12-02T14:11:12,275 DEBUG [M:0;a3a61c9ba14f:36523 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5c856da049f44ee0a838188a6fd955fb as hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5c856da049f44ee0a838188a6fd955fb 2024-12-02T14:11:12,280 INFO [M:0;a3a61c9ba14f:36523 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5c856da049f44ee0a838188a6fd955fb, entries=1, sequenceid=56, filesize=4.9 K 2024-12-02T14:11:12,281 INFO [M:0;a3a61c9ba14f:36523 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=56, compaction requested=false 2024-12-02T14:11:12,283 INFO [M:0;a3a61c9ba14f:36523 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:11:12,283 DEBUG [M:0;a3a61c9ba14f:36523 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733148672156Disabling compacts and flushes for region at 1733148672156Disabling writes for close at 1733148672156Obtaining lock to block concurrent updates at 1733148672156Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733148672156Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1733148672157 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733148672163 (+6 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733148672163Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733148672175 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733148672175Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733148672184 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733148672199 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733148672199Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733148672208 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733148672223 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733148672223Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733148672234 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733148672246 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733148672247 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26f99c7d: reopening flushed file at 1733148672255 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b76139a: reopening flushed file at 1733148672261 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f4e4b7e: reopening flushed file at 1733148672268 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7837b266: reopening flushed file at 1733148672275 (+7 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=56, compaction requested=false at 1733148672281 (+6 ms)Writing region close event to WAL at 1733148672283 (+2 ms)Closed at 1733148672283 2024-12-02T14:11:12,283 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:12,283 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:12,283 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:12,283 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:12,283 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:12,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38739 is added to blk_1073741847_1031 (size=757) 2024-12-02T14:11:12,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40013 is added to blk_1073741847_1031 (size=757) 2024-12-02T14:11:12,869 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:12,869 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:12,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:12,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:12,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:12,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:12,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:12,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:12,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:12,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:12,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:12,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:12,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:12,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:12,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:12,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:13,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:13,392 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T14:11:13,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:13,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:13,393 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:13,393 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:13,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:13,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:13,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:13,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:13,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:13,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:13,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:13,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:13,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:13,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:13,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:13,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:14,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:14,658 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-02T14:11:14,724 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T14:11:14,724 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T14:11:14,724 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T14:11:14,724 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-02T14:11:14,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:14,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:15,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:15,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:15,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:16,164 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/WALs/a3a61c9ba14f,36523,1733148641978/a3a61c9ba14f%2C36523%2C1733148641978.1733148642125 after 4002ms 2024-12-02T14:11:16,169 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/WALs/a3a61c9ba14f,36523,1733148641978/a3a61c9ba14f%2C36523%2C1733148641978.1733148642125 to hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/oldWALs/a3a61c9ba14f%2C36523%2C1733148641978.1733148642125 2024-12-02T14:11:16,178 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/MasterData/oldWALs/a3a61c9ba14f%2C36523%2C1733148641978.1733148642125 to hdfs://localhost:46733/user/jenkins/test-data/c7fb81de-5f71-608a-ac05-a0c069837b40/oldWALs/a3a61c9ba14f%2C36523%2C1733148641978.1733148642125$masterlocalwal$ 2024-12-02T14:11:16,178 INFO [M:0;a3a61c9ba14f:36523 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T14:11:16,178 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:11:16,178 INFO [M:0;a3a61c9ba14f:36523 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36523 2024-12-02T14:11:16,178 INFO [M:0;a3a61c9ba14f:36523 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:11:16,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:16,279 INFO [M:0;a3a61c9ba14f:36523 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:11:16,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:11:16,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36523-0x1009b44e7190000, quorum=127.0.0.1:55590, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:11:16,282 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6a90fb45{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:11:16,282 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1dad3af2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:11:16,283 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:11:16,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5aaed393{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:11:16,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2392cae{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir/,STOPPED} 2024-12-02T14:11:16,284 WARN [BP-2116755885-172.17.0.2-1733148641423 heartbeating to localhost/127.0.0.1:46733 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:11:16,285 WARN [BP-2116755885-172.17.0.2-1733148641423 heartbeating to localhost/127.0.0.1:46733 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2116755885-172.17.0.2-1733148641423 (Datanode Uuid ffbefef4-3524-41ba-bac2-c52fc664f9c6) service to localhost/127.0.0.1:46733 2024-12-02T14:11:16,285 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data3/current/BP-2116755885-172.17.0.2-1733148641423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:11:16,285 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data4/current/BP-2116755885-172.17.0.2-1733148641423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:11:16,285 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:11:16,286 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:11:16,286 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:11:16,294 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27f5a072{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:11:16,294 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3a397072{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:11:16,294 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:11:16,294 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b944a8d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:11:16,294 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d151a18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir/,STOPPED} 2024-12-02T14:11:16,296 WARN [BP-2116755885-172.17.0.2-1733148641423 heartbeating to localhost/127.0.0.1:46733 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:11:16,296 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:11:16,296 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:11:16,296 WARN [BP-2116755885-172.17.0.2-1733148641423 heartbeating to localhost/127.0.0.1:46733 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2116755885-172.17.0.2-1733148641423 (Datanode Uuid 8da22597-c7c2-4ba5-a5c1-5bbf45bdc016) service to localhost/127.0.0.1:46733 2024-12-02T14:11:16,297 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data1/current/BP-2116755885-172.17.0.2-1733148641423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:11:16,297 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/cluster_98d551dc-3b31-f42d-21e9-29223dec6c4a/data/data2/current/BP-2116755885-172.17.0.2-1733148641423 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:11:16,297 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:11:16,302 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@53a4c428{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:11:16,303 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@347a2271{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:11:16,303 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:11:16,303 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@421a8f73{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:11:16,303 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a3a743f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir/,STOPPED} 2024-12-02T14:11:16,310 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T14:11:16,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T14:11:16,344 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 155) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:46733 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46733 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46733 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46733 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46733 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46733 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46733 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46733 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 440) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=96 (was 44) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6250 (was 6169) - AvailableMemoryMB LEAK? - 2024-12-02T14:11:16,353 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=96, ProcessCount=11, AvailableMemoryMB=6249 2024-12-02T14:11:16,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T14:11:16,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.log.dir so I do NOT create it in target/test-data/49528bd8-f368-c99c-0298-078e8c125494 2024-12-02T14:11:16,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02c92d7d-2ef6-53f3-240e-1d88ece15293/hadoop.tmp.dir so I do NOT create it in target/test-data/49528bd8-f368-c99c-0298-078e8c125494 2024-12-02T14:11:16,353 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/cluster_a0a71af9-d3d1-8389-4db9-571281a82091, deleteOnExit=true 2024-12-02T14:11:16,353 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T14:11:16,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/test.cache.data in system properties and HBase conf 2024-12-02T14:11:16,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T14:11:16,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/hadoop.log.dir in system properties and HBase conf 2024-12-02T14:11:16,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T14:11:16,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T14:11:16,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T14:11:16,354 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T14:11:16,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:11:16,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:11:16,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T14:11:16,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:11:16,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T14:11:16,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T14:11:16,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:11:16,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:11:16,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T14:11:16,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/nfs.dump.dir in system properties and HBase conf 2024-12-02T14:11:16,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/java.io.tmpdir in system properties and HBase conf 2024-12-02T14:11:16,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:11:16,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T14:11:16,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T14:11:16,368 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T14:11:16,425 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:11:16,429 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:11:16,430 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:11:16,430 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:11:16,430 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T14:11:16,434 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:11:16,434 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@252e2abb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:11:16,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@521d1c5a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:11:16,532 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@12208e1b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/java.io.tmpdir/jetty-localhost-39229-hadoop-hdfs-3_4_1-tests_jar-_-any-1937129011540944685/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:11:16,532 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@56e526c{HTTP/1.1, (http/1.1)}{localhost:39229} 2024-12-02T14:11:16,532 INFO [Time-limited test {}] server.Server(415): Started @186195ms 2024-12-02T14:11:16,543 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T14:11:16,600 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:11:16,604 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:11:16,605 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:11:16,605 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:11:16,605 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:11:16,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48bfafbe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:11:16,606 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45ae7776{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:11:16,708 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ac76b28{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/java.io.tmpdir/jetty-localhost-36711-hadoop-hdfs-3_4_1-tests_jar-_-any-10928312203620248476/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:11:16,709 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@371e2711{HTTP/1.1, (http/1.1)}{localhost:36711} 2024-12-02T14:11:16,709 INFO [Time-limited test {}] server.Server(415): Started @186372ms 2024-12-02T14:11:16,710 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:11:16,762 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:11:16,770 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:11:16,775 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:11:16,775 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:11:16,775 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:11:16,776 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fc981fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:11:16,776 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@739c2ff2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:11:16,794 WARN [Thread-1637 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/cluster_a0a71af9-d3d1-8389-4db9-571281a82091/data/data1/current/BP-774725059-172.17.0.2-1733148676379/current, will proceed with Du for space computation calculation, 2024-12-02T14:11:16,795 WARN [Thread-1638 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/cluster_a0a71af9-d3d1-8389-4db9-571281a82091/data/data2/current/BP-774725059-172.17.0.2-1733148676379/current, will proceed with Du for space computation calculation, 2024-12-02T14:11:16,821 WARN [Thread-1616 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:11:16,826 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7265eda95ec131e3 with lease ID 0xa253ba06c793012f: Processing first storage report for DS-98f4723c-8900-4960-8287-492cb021285d from datanode DatanodeRegistration(127.0.0.1:32821, datanodeUuid=8d0d4d90-c7a5-4e72-9d71-e8e3230ac31c, infoPort=42965, infoSecurePort=0, ipcPort=41363, storageInfo=lv=-57;cid=testClusterID;nsid=714356700;c=1733148676379) 2024-12-02T14:11:16,827 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7265eda95ec131e3 with lease ID 0xa253ba06c793012f: from storage DS-98f4723c-8900-4960-8287-492cb021285d node DatanodeRegistration(127.0.0.1:32821, datanodeUuid=8d0d4d90-c7a5-4e72-9d71-e8e3230ac31c, infoPort=42965, infoSecurePort=0, ipcPort=41363, storageInfo=lv=-57;cid=testClusterID;nsid=714356700;c=1733148676379), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:11:16,827 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7265eda95ec131e3 with lease ID 0xa253ba06c793012f: Processing first storage report for DS-b8e9c684-193c-467b-a748-6daf2f088185 from datanode DatanodeRegistration(127.0.0.1:32821, datanodeUuid=8d0d4d90-c7a5-4e72-9d71-e8e3230ac31c, infoPort=42965, infoSecurePort=0, ipcPort=41363, storageInfo=lv=-57;cid=testClusterID;nsid=714356700;c=1733148676379) 2024-12-02T14:11:16,827 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7265eda95ec131e3 with lease ID 0xa253ba06c793012f: from storage DS-b8e9c684-193c-467b-a748-6daf2f088185 node DatanodeRegistration(127.0.0.1:32821, datanodeUuid=8d0d4d90-c7a5-4e72-9d71-e8e3230ac31c, infoPort=42965, infoSecurePort=0, ipcPort=41363, storageInfo=lv=-57;cid=testClusterID;nsid=714356700;c=1733148676379), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:11:16,894 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b8edabe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/java.io.tmpdir/jetty-localhost-42431-hadoop-hdfs-3_4_1-tests_jar-_-any-17075146679223597481/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:11:16,895 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3838d9cb{HTTP/1.1, (http/1.1)}{localhost:42431} 2024-12-02T14:11:16,895 INFO [Time-limited test {}] server.Server(415): Started @186558ms 2024-12-02T14:11:16,897 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:11:16,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:16,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:16,974 WARN [Thread-1663 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/cluster_a0a71af9-d3d1-8389-4db9-571281a82091/data/data3/current/BP-774725059-172.17.0.2-1733148676379/current, will proceed with Du for space computation calculation, 2024-12-02T14:11:16,976 WARN [Thread-1664 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/cluster_a0a71af9-d3d1-8389-4db9-571281a82091/data/data4/current/BP-774725059-172.17.0.2-1733148676379/current, will proceed with Du for space computation calculation, 2024-12-02T14:11:17,000 WARN [Thread-1652 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:11:17,003 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1b813e1af73ab35b with lease ID 0xa253ba06c7930130: Processing first storage report for DS-6f93635b-ba4b-46b5-8fce-23d7cfab5dda from datanode DatanodeRegistration(127.0.0.1:34479, datanodeUuid=0dba603a-0b0e-4ee3-b60f-168b8987b941, infoPort=39319, infoSecurePort=0, ipcPort=41581, storageInfo=lv=-57;cid=testClusterID;nsid=714356700;c=1733148676379) 2024-12-02T14:11:17,003 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1b813e1af73ab35b with lease ID 0xa253ba06c7930130: from storage DS-6f93635b-ba4b-46b5-8fce-23d7cfab5dda node DatanodeRegistration(127.0.0.1:34479, datanodeUuid=0dba603a-0b0e-4ee3-b60f-168b8987b941, infoPort=39319, infoSecurePort=0, ipcPort=41581, storageInfo=lv=-57;cid=testClusterID;nsid=714356700;c=1733148676379), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T14:11:17,003 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1b813e1af73ab35b with lease ID 0xa253ba06c7930130: Processing first storage report for DS-5aea8318-816b-440b-8f24-388634eb82f0 from datanode DatanodeRegistration(127.0.0.1:34479, datanodeUuid=0dba603a-0b0e-4ee3-b60f-168b8987b941, infoPort=39319, infoSecurePort=0, ipcPort=41581, storageInfo=lv=-57;cid=testClusterID;nsid=714356700;c=1733148676379) 2024-12-02T14:11:17,003 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1b813e1af73ab35b with lease ID 0xa253ba06c7930130: from storage DS-5aea8318-816b-440b-8f24-388634eb82f0 node DatanodeRegistration(127.0.0.1:34479, datanodeUuid=0dba603a-0b0e-4ee3-b60f-168b8987b941, infoPort=39319, infoSecurePort=0, ipcPort=41581, storageInfo=lv=-57;cid=testClusterID;nsid=714356700;c=1733148676379), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:11:17,026 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494 2024-12-02T14:11:17,033 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/cluster_a0a71af9-d3d1-8389-4db9-571281a82091/zookeeper_0, clientPort=56298, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/cluster_a0a71af9-d3d1-8389-4db9-571281a82091/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/cluster_a0a71af9-d3d1-8389-4db9-571281a82091/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T14:11:17,037 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56298 2024-12-02T14:11:17,038 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:11:17,040 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:11:17,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:11:17,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:11:17,061 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1 with version=8 2024-12-02T14:11:17,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/hbase-staging 2024-12-02T14:11:17,063 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:11:17,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:11:17,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:11:17,063 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:11:17,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:11:17,063 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:11:17,064 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T14:11:17,064 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:11:17,065 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43047 2024-12-02T14:11:17,067 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43047 connecting to ZooKeeper ensemble=127.0.0.1:56298 2024-12-02T14:11:17,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:430470x0, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:11:17,084 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43047-0x1009b4570130000 connected 2024-12-02T14:11:17,127 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:11:17,130 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:11:17,134 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:11:17,134 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1, hbase.cluster.distributed=false 2024-12-02T14:11:17,136 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:11:17,137 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43047 2024-12-02T14:11:17,137 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43047 2024-12-02T14:11:17,145 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43047 2024-12-02T14:11:17,147 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43047 2024-12-02T14:11:17,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43047 2024-12-02T14:11:17,171 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:11:17,171 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:11:17,172 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:11:17,172 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:11:17,172 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:11:17,172 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:11:17,172 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T14:11:17,172 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:11:17,173 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42993 2024-12-02T14:11:17,175 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42993 connecting to ZooKeeper ensemble=127.0.0.1:56298 2024-12-02T14:11:17,176 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:11:17,178 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:11:17,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:429930x0, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:11:17,183 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:429930x0, quorum=127.0.0.1:56298, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:11:17,184 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T14:11:17,184 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42993-0x1009b4570130001 connected 2024-12-02T14:11:17,184 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T14:11:17,185 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T14:11:17,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:17,186 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:11:17,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42993 2024-12-02T14:11:17,192 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42993 2024-12-02T14:11:17,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42993 2024-12-02T14:11:17,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42993 2024-12-02T14:11:17,200 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42993 2024-12-02T14:11:17,212 DEBUG [M:0;a3a61c9ba14f:43047 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a3a61c9ba14f:43047 2024-12-02T14:11:17,213 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a3a61c9ba14f,43047,1733148677063 2024-12-02T14:11:17,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:11:17,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:11:17,214 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a3a61c9ba14f,43047,1733148677063 2024-12-02T14:11:17,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T14:11:17,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:17,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:17,216 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T14:11:17,216 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a3a61c9ba14f,43047,1733148677063 from backup master directory 2024-12-02T14:11:17,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a3a61c9ba14f,43047,1733148677063 2024-12-02T14:11:17,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:11:17,217 WARN [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:11:17,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:11:17,217 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a3a61c9ba14f,43047,1733148677063 2024-12-02T14:11:17,227 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/hbase.id] with ID: 3e5932f4-5a63-4b18-aa21-10e523283def 2024-12-02T14:11:17,227 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/.tmp/hbase.id 2024-12-02T14:11:17,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:11:17,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:11:17,244 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/.tmp/hbase.id]:[hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/hbase.id] 2024-12-02T14:11:17,261 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:11:17,261 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T14:11:17,263 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-02T14:11:17,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:17,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:17,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:11:17,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:11:17,280 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T14:11:17,281 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T14:11:17,282 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:11:17,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:11:17,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:11:17,299 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store 2024-12-02T14:11:17,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:11:17,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:11:17,312 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:11:17,312 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:11:17,312 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:11:17,312 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:11:17,312 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:11:17,312 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:11:17,312 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:11:17,313 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733148677312Disabling compacts and flushes for region at 1733148677312Disabling writes for close at 1733148677312Writing region close event to WAL at 1733148677312Closed at 1733148677312 2024-12-02T14:11:17,313 WARN [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/.initializing 2024-12-02T14:11:17,313 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/WALs/a3a61c9ba14f,43047,1733148677063 2024-12-02T14:11:17,316 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C43047%2C1733148677063, suffix=, logDir=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/WALs/a3a61c9ba14f,43047,1733148677063, archiveDir=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/oldWALs, maxLogs=10 2024-12-02T14:11:17,317 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C43047%2C1733148677063.1733148677317 2024-12-02T14:11:17,331 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/WALs/a3a61c9ba14f,43047,1733148677063/a3a61c9ba14f%2C43047%2C1733148677063.1733148677317 2024-12-02T14:11:17,337 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39319:39319),(127.0.0.1/127.0.0.1:42965:42965)] 2024-12-02T14:11:17,340 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:11:17,340 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:11:17,340 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:11:17,340 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:11:17,342 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:11:17,344 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T14:11:17,344 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:17,345 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:11:17,345 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:11:17,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T14:11:17,346 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:17,347 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:11:17,347 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:11:17,348 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T14:11:17,348 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:17,349 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:11:17,349 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:11:17,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T14:11:17,351 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:17,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:11:17,351 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:11:17,353 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:11:17,353 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:11:17,354 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:11:17,355 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:11:17,355 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T14:11:17,357 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:11:17,364 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:11:17,364 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=838528, jitterRate=0.06624354422092438}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T14:11:17,365 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733148677340Initializing all the Stores at 1733148677341 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148677341Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148677342 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148677342Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148677342Cleaning up temporary data from old regions at 1733148677355 (+13 ms)Region opened successfully at 1733148677365 (+10 ms) 2024-12-02T14:11:17,365 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T14:11:17,369 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60325b96, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:11:17,370 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T14:11:17,370 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T14:11:17,370 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T14:11:17,370 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T14:11:17,371 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T14:11:17,371 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-02T14:11:17,371 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T14:11:17,375 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T14:11:17,376 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T14:11:17,377 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T14:11:17,377 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T14:11:17,378 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T14:11:17,378 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T14:11:17,379 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T14:11:17,382 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T14:11:17,382 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T14:11:17,385 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T14:11:17,386 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T14:11:17,388 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T14:11:17,388 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T14:11:17,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:11:17,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:11:17,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:17,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:17,391 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a3a61c9ba14f,43047,1733148677063, sessionid=0x1009b4570130000, setting cluster-up flag (Was=false) 2024-12-02T14:11:17,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:17,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:17,396 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T14:11:17,397 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a3a61c9ba14f,43047,1733148677063 2024-12-02T14:11:17,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:17,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:17,403 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T14:11:17,404 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a3a61c9ba14f,43047,1733148677063 2024-12-02T14:11:17,409 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T14:11:17,411 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T14:11:17,412 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T14:11:17,412 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T14:11:17,412 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a3a61c9ba14f,43047,1733148677063 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T14:11:17,413 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:11:17,413 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:11:17,413 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:11:17,413 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:11:17,413 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a3a61c9ba14f:0, corePoolSize=10, maxPoolSize=10 2024-12-02T14:11:17,414 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:11:17,414 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:11:17,414 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:11:17,422 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733148707422 2024-12-02T14:11:17,422 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T14:11:17,422 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T14:11:17,422 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T14:11:17,422 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T14:11:17,422 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T14:11:17,422 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T14:11:17,423 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:11:17,423 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:17,423 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T14:11:17,424 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T14:11:17,425 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T14:11:17,425 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T14:11:17,426 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:17,426 INFO [PEWorker-2 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T14:11:17,429 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T14:11:17,429 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T14:11:17,433 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148677429,5,FailOnTimeoutGroup] 2024-12-02T14:11:17,433 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148677433,5,FailOnTimeoutGroup] 2024-12-02T14:11:17,433 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:17,433 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T14:11:17,433 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:17,433 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:17,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:11:17,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:11:17,451 INFO [PEWorker-2 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T14:11:17,451 INFO [PEWorker-2 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1 2024-12-02T14:11:17,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:11:17,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:11:17,472 DEBUG [PEWorker-2 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:11:17,477 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:11:17,479 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:11:17,479 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:17,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:11:17,479 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:11:17,481 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:11:17,481 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:17,481 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:11:17,481 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:11:17,482 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:11:17,482 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:17,483 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:11:17,483 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:11:17,484 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:11:17,484 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:17,485 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:11:17,485 DEBUG [PEWorker-2 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:11:17,486 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740 2024-12-02T14:11:17,486 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740 2024-12-02T14:11:17,488 DEBUG [PEWorker-2 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:11:17,488 DEBUG [PEWorker-2 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:11:17,488 DEBUG [PEWorker-2 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:11:17,489 DEBUG [PEWorker-2 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:11:17,491 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:11:17,491 INFO [PEWorker-2 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=763408, jitterRate=-0.02927742898464203}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:11:17,492 DEBUG [PEWorker-2 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733148677472Initializing all the Stores at 1733148677473 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148677473Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148677477 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148677477Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148677477Cleaning up temporary data from old regions at 1733148677488 (+11 ms)Region opened successfully at 1733148677492 (+4 ms) 2024-12-02T14:11:17,492 DEBUG [PEWorker-2 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:11:17,492 INFO [PEWorker-2 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:11:17,492 DEBUG [PEWorker-2 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:11:17,492 DEBUG [PEWorker-2 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:11:17,492 DEBUG [PEWorker-2 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:11:17,492 INFO [PEWorker-2 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:11:17,492 DEBUG [PEWorker-2 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733148677492Disabling compacts and flushes for region at 1733148677492Disabling writes for close at 1733148677492Writing region close event to WAL at 1733148677492Closed at 1733148677492 2024-12-02T14:11:17,494 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:11:17,494 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T14:11:17,494 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T14:11:17,496 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:11:17,497 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T14:11:17,503 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(746): ClusterId : 3e5932f4-5a63-4b18-aa21-10e523283def 2024-12-02T14:11:17,503 DEBUG [RS:0;a3a61c9ba14f:42993 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T14:11:17,506 DEBUG [RS:0;a3a61c9ba14f:42993 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T14:11:17,506 DEBUG [RS:0;a3a61c9ba14f:42993 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T14:11:17,508 DEBUG [RS:0;a3a61c9ba14f:42993 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T14:11:17,509 DEBUG [RS:0;a3a61c9ba14f:42993 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1153aa8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:11:17,525 DEBUG [RS:0;a3a61c9ba14f:42993 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a3a61c9ba14f:42993 2024-12-02T14:11:17,525 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T14:11:17,525 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T14:11:17,525 DEBUG [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T14:11:17,527 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(2659): reportForDuty to master=a3a61c9ba14f,43047,1733148677063 with port=42993, startcode=1733148677171 2024-12-02T14:11:17,527 DEBUG [RS:0;a3a61c9ba14f:42993 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T14:11:17,531 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37607, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T14:11:17,532 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43047 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a3a61c9ba14f,42993,1733148677171 2024-12-02T14:11:17,532 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43047 {}] master.ServerManager(517): Registering regionserver=a3a61c9ba14f,42993,1733148677171 2024-12-02T14:11:17,535 DEBUG [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1 2024-12-02T14:11:17,535 DEBUG [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45363 2024-12-02T14:11:17,536 DEBUG [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T14:11:17,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:11:17,540 DEBUG [RS:0;a3a61c9ba14f:42993 {}] zookeeper.ZKUtil(111): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a3a61c9ba14f,42993,1733148677171 2024-12-02T14:11:17,540 WARN [RS:0;a3a61c9ba14f:42993 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:11:17,540 INFO [RS:0;a3a61c9ba14f:42993 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:11:17,541 DEBUG [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171 2024-12-02T14:11:17,542 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a3a61c9ba14f,42993,1733148677171] 2024-12-02T14:11:17,549 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T14:11:17,554 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T14:11:17,557 INFO [RS:0;a3a61c9ba14f:42993 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T14:11:17,557 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:17,559 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T14:11:17,560 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T14:11:17,560 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:17,561 DEBUG [RS:0;a3a61c9ba14f:42993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:11:17,561 DEBUG [RS:0;a3a61c9ba14f:42993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:11:17,561 DEBUG [RS:0;a3a61c9ba14f:42993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:11:17,561 DEBUG [RS:0;a3a61c9ba14f:42993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:11:17,561 DEBUG [RS:0;a3a61c9ba14f:42993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:11:17,561 DEBUG [RS:0;a3a61c9ba14f:42993 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:11:17,561 DEBUG [RS:0;a3a61c9ba14f:42993 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:11:17,561 DEBUG [RS:0;a3a61c9ba14f:42993 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:11:17,561 DEBUG [RS:0;a3a61c9ba14f:42993 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:11:17,561 DEBUG [RS:0;a3a61c9ba14f:42993 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:11:17,561 DEBUG [RS:0;a3a61c9ba14f:42993 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:11:17,562 DEBUG [RS:0;a3a61c9ba14f:42993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:11:17,562 DEBUG [RS:0;a3a61c9ba14f:42993 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:11:17,562 DEBUG [RS:0;a3a61c9ba14f:42993 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:11:17,562 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:17,562 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:17,562 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:17,562 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:17,563 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:17,563 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,42993,1733148677171-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:11:17,579 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T14:11:17,579 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,42993,1733148677171-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:17,579 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:17,579 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.Replication(171): a3a61c9ba14f,42993,1733148677171 started 2024-12-02T14:11:17,595 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:17,595 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(1482): Serving as a3a61c9ba14f,42993,1733148677171, RpcServer on a3a61c9ba14f/172.17.0.2:42993, sessionid=0x1009b4570130001 2024-12-02T14:11:17,595 DEBUG [RS:0;a3a61c9ba14f:42993 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T14:11:17,595 DEBUG [RS:0;a3a61c9ba14f:42993 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a3a61c9ba14f,42993,1733148677171 2024-12-02T14:11:17,595 DEBUG [RS:0;a3a61c9ba14f:42993 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,42993,1733148677171' 2024-12-02T14:11:17,595 DEBUG [RS:0;a3a61c9ba14f:42993 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T14:11:17,596 DEBUG [RS:0;a3a61c9ba14f:42993 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T14:11:17,596 DEBUG [RS:0;a3a61c9ba14f:42993 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T14:11:17,597 DEBUG [RS:0;a3a61c9ba14f:42993 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T14:11:17,597 DEBUG [RS:0;a3a61c9ba14f:42993 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a3a61c9ba14f,42993,1733148677171 2024-12-02T14:11:17,597 DEBUG [RS:0;a3a61c9ba14f:42993 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,42993,1733148677171' 2024-12-02T14:11:17,597 DEBUG [RS:0;a3a61c9ba14f:42993 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T14:11:17,597 DEBUG [RS:0;a3a61c9ba14f:42993 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T14:11:17,598 DEBUG [RS:0;a3a61c9ba14f:42993 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T14:11:17,598 INFO [RS:0;a3a61c9ba14f:42993 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T14:11:17,598 INFO [RS:0;a3a61c9ba14f:42993 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T14:11:17,648 WARN [a3a61c9ba14f:43047 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T14:11:17,699 INFO [RS:0;a3a61c9ba14f:42993 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C42993%2C1733148677171, suffix=, logDir=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171, archiveDir=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/oldWALs, maxLogs=32 2024-12-02T14:11:17,700 INFO [RS:0;a3a61c9ba14f:42993 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C42993%2C1733148677171.1733148677700 2024-12-02T14:11:17,714 INFO [RS:0;a3a61c9ba14f:42993 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171/a3a61c9ba14f%2C42993%2C1733148677171.1733148677700 2024-12-02T14:11:17,720 DEBUG [RS:0;a3a61c9ba14f:42993 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42965:42965),(127.0.0.1/127.0.0.1:39319:39319)] 2024-12-02T14:11:17,898 DEBUG [a3a61c9ba14f:43047 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T14:11:17,899 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a3a61c9ba14f,42993,1733148677171 2024-12-02T14:11:17,900 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a3a61c9ba14f,42993,1733148677171, state=OPENING 2024-12-02T14:11:17,902 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T14:11:17,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:17,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:11:17,903 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:11:17,903 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:11:17,903 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:11:17,903 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,42993,1733148677171}] 2024-12-02T14:11:17,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:17,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:18,057 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T14:11:18,058 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50863, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T14:11:18,063 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T14:11:18,063 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:11:18,067 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C42993%2C1733148677171.meta, suffix=.meta, logDir=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171, archiveDir=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/oldWALs, maxLogs=32 2024-12-02T14:11:18,067 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C42993%2C1733148677171.meta.1733148678067.meta 2024-12-02T14:11:18,077 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171/a3a61c9ba14f%2C42993%2C1733148677171.meta.1733148678067.meta 2024-12-02T14:11:18,088 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42965:42965),(127.0.0.1/127.0.0.1:39319:39319)] 2024-12-02T14:11:18,092 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:11:18,093 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T14:11:18,093 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T14:11:18,093 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T14:11:18,093 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T14:11:18,093 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:11:18,093 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T14:11:18,093 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T14:11:18,097 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:11:18,098 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:11:18,098 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:18,099 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:11:18,099 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:11:18,100 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:11:18,100 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:18,100 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:11:18,100 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:11:18,101 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:11:18,101 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:18,101 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:11:18,101 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:11:18,102 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:11:18,102 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:18,102 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:11:18,103 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:11:18,103 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740 2024-12-02T14:11:18,104 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740 2024-12-02T14:11:18,106 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:11:18,106 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:11:18,106 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:11:18,107 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:11:18,108 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=883341, jitterRate=0.12322671711444855}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:11:18,108 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T14:11:18,109 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733148678093Writing region info on filesystem at 1733148678093Initializing all the Stores at 1733148678094 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148678094Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148678097 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148678097Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148678097Cleaning up temporary data from old regions at 1733148678106 (+9 ms)Running coprocessor post-open hooks at 1733148678108 (+2 ms)Region opened successfully at 1733148678109 (+1 ms) 2024-12-02T14:11:18,110 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733148678056 2024-12-02T14:11:18,113 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T14:11:18,113 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T14:11:18,114 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a3a61c9ba14f,42993,1733148677171 2024-12-02T14:11:18,116 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a3a61c9ba14f,42993,1733148677171, state=OPEN 2024-12-02T14:11:18,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:11:18,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:11:18,119 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,42993,1733148677171 2024-12-02T14:11:18,120 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:11:18,120 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:11:18,124 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T14:11:18,124 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,42993,1733148677171 in 217 msec 2024-12-02T14:11:18,127 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T14:11:18,127 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 630 msec 2024-12-02T14:11:18,128 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:11:18,128 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T14:11:18,129 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:11:18,129 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a3a61c9ba14f,42993,1733148677171, seqNum=-1] 2024-12-02T14:11:18,129 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:11:18,131 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37639, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:11:18,137 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 725 msec 2024-12-02T14:11:18,137 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733148678137, completionTime=-1 2024-12-02T14:11:18,137 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T14:11:18,137 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T14:11:18,139 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-02T14:11:18,139 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733148738139 2024-12-02T14:11:18,139 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733148798139 2024-12-02T14:11:18,140 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-02T14:11:18,140 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,43047,1733148677063-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:18,140 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,43047,1733148677063-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:18,140 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,43047,1733148677063-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:18,140 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a3a61c9ba14f:43047, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:18,140 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:18,141 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:18,143 DEBUG [master/a3a61c9ba14f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T14:11:18,146 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.928sec 2024-12-02T14:11:18,146 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T14:11:18,146 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T14:11:18,146 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T14:11:18,146 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T14:11:18,147 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T14:11:18,147 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,43047,1733148677063-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:11:18,147 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,43047,1733148677063-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T14:11:18,150 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T14:11:18,150 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T14:11:18,150 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,43047,1733148677063-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:18,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:18,205 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65345c29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:11:18,205 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a3a61c9ba14f,43047,-1 for getting cluster id 2024-12-02T14:11:18,205 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T14:11:18,208 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3e5932f4-5a63-4b18-aa21-10e523283def' 2024-12-02T14:11:18,208 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T14:11:18,209 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3e5932f4-5a63-4b18-aa21-10e523283def" 2024-12-02T14:11:18,209 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e3b927c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:11:18,209 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a3a61c9ba14f,43047,-1] 2024-12-02T14:11:18,209 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T14:11:18,209 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:11:18,211 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37900, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T14:11:18,213 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b4013d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:11:18,213 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:11:18,215 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a3a61c9ba14f,42993,1733148677171, seqNum=-1] 2024-12-02T14:11:18,216 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:11:18,218 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51406, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:11:18,219 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a3a61c9ba14f,43047,1733148677063 2024-12-02T14:11:18,220 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:11:18,222 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-02T14:11:18,223 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T14:11:18,225 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is a3a61c9ba14f,43047,1733148677063 2024-12-02T14:11:18,225 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@860441f 2024-12-02T14:11:18,225 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T14:11:18,227 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37916, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T14:11:18,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T14:11:18,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T14:11:18,228 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T14:11:18,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T14:11:18,232 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T14:11:18,232 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:18,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-02T14:11:18,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T14:11:18,235 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T14:11:18,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741835_1011 (size=405) 2024-12-02T14:11:18,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741835_1011 (size=405) 2024-12-02T14:11:18,249 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => daf7958a964b19f953e8319bc097332e, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1 2024-12-02T14:11:18,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741836_1012 (size=88) 2024-12-02T14:11:18,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741836_1012 (size=88) 2024-12-02T14:11:18,657 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:11:18,657 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing daf7958a964b19f953e8319bc097332e, disabling compactions & flushes 2024-12-02T14:11:18,658 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:18,658 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:18,658 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. after waiting 0 ms 2024-12-02T14:11:18,658 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:18,658 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:18,658 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for daf7958a964b19f953e8319bc097332e: Waiting for close lock at 1733148678657Disabling compacts and flushes for region at 1733148678657Disabling writes for close at 1733148678658 (+1 ms)Writing region close event to WAL at 1733148678658Closed at 1733148678658 2024-12-02T14:11:18,660 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T14:11:18,661 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733148678661"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733148678661"}]},"ts":"1733148678661"} 2024-12-02T14:11:18,664 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T14:11:18,667 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T14:11:18,667 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733148678667"}]},"ts":"1733148678667"} 2024-12-02T14:11:18,670 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-02T14:11:18,670 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=daf7958a964b19f953e8319bc097332e, ASSIGN}] 2024-12-02T14:11:18,672 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=daf7958a964b19f953e8319bc097332e, ASSIGN 2024-12-02T14:11:18,674 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=daf7958a964b19f953e8319bc097332e, ASSIGN; state=OFFLINE, location=a3a61c9ba14f,42993,1733148677171; forceNewPlan=false, retain=false 2024-12-02T14:11:18,825 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=daf7958a964b19f953e8319bc097332e, regionState=OPENING, regionLocation=a3a61c9ba14f,42993,1733148677171 2024-12-02T14:11:18,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=daf7958a964b19f953e8319bc097332e, ASSIGN because future has completed 2024-12-02T14:11:18,829 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure daf7958a964b19f953e8319bc097332e, server=a3a61c9ba14f,42993,1733148677171}] 2024-12-02T14:11:18,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:18,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:18,990 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:18,991 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => daf7958a964b19f953e8319bc097332e, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:11:18,991 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling daf7958a964b19f953e8319bc097332e 2024-12-02T14:11:18,991 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:11:18,991 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for daf7958a964b19f953e8319bc097332e 2024-12-02T14:11:18,991 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for daf7958a964b19f953e8319bc097332e 2024-12-02T14:11:18,993 INFO [StoreOpener-daf7958a964b19f953e8319bc097332e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region daf7958a964b19f953e8319bc097332e 2024-12-02T14:11:18,995 INFO [StoreOpener-daf7958a964b19f953e8319bc097332e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region daf7958a964b19f953e8319bc097332e columnFamilyName info 2024-12-02T14:11:18,995 DEBUG [StoreOpener-daf7958a964b19f953e8319bc097332e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:11:18,996 INFO [StoreOpener-daf7958a964b19f953e8319bc097332e-1 {}] regionserver.HStore(327): Store=daf7958a964b19f953e8319bc097332e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:11:18,996 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for daf7958a964b19f953e8319bc097332e 2024-12-02T14:11:18,997 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e 2024-12-02T14:11:18,998 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e 2024-12-02T14:11:18,999 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for daf7958a964b19f953e8319bc097332e 2024-12-02T14:11:18,999 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for daf7958a964b19f953e8319bc097332e 2024-12-02T14:11:19,001 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for daf7958a964b19f953e8319bc097332e 2024-12-02T14:11:19,005 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:11:19,006 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened daf7958a964b19f953e8319bc097332e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=731062, jitterRate=-0.07040762901306152}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:11:19,006 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for daf7958a964b19f953e8319bc097332e 2024-12-02T14:11:19,007 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for daf7958a964b19f953e8319bc097332e: Running coprocessor pre-open hook at 1733148678991Writing region info on filesystem at 1733148678991Initializing all the Stores at 1733148678993 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148678993Cleaning up temporary data from old regions at 1733148678999 (+6 ms)Running coprocessor post-open hooks at 1733148679006 (+7 ms)Region opened successfully at 1733148679007 (+1 ms) 2024-12-02T14:11:19,009 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e., pid=6, masterSystemTime=1733148678983 2024-12-02T14:11:19,013 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:19,013 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:19,014 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=daf7958a964b19f953e8319bc097332e, regionState=OPEN, openSeqNum=2, regionLocation=a3a61c9ba14f,42993,1733148677171 2024-12-02T14:11:19,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure daf7958a964b19f953e8319bc097332e, server=a3a61c9ba14f,42993,1733148677171 because future has completed 2024-12-02T14:11:19,022 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T14:11:19,022 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure daf7958a964b19f953e8319bc097332e, server=a3a61c9ba14f,42993,1733148677171 in 189 msec 2024-12-02T14:11:19,024 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T14:11:19,024 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=daf7958a964b19f953e8319bc097332e, ASSIGN in 352 msec 2024-12-02T14:11:19,025 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T14:11:19,025 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733148679025"}]},"ts":"1733148679025"} 2024-12-02T14:11:19,028 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-02T14:11:19,029 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T14:11:19,031 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 801 msec 2024-12-02T14:11:19,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:19,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:19,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:20,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:20,227 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T14:11:20,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,232 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:11:20,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:20,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:21,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:21,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:21,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:22,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:22,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:22,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:23,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:23,549 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T14:11:23,550 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-02T14:11:23,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:23,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:24,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:24,724 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T14:11:24,724 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-02T14:11:24,724 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T14:11:24,724 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-02T14:11:24,724 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T14:11:24,724 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-02T14:11:24,725 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T14:11:24,725 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-02T14:11:24,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:24,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:25,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:25,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:25,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:26,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:26,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:26,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:27,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:27,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:27,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:28,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:28,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T14:11:28,318 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-02T14:11:28,318 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-02T14:11:28,322 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T14:11:28,323 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:28,326 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e., hostname=a3a61c9ba14f,42993,1733148677171, seqNum=2] 2024-12-02T14:11:28,336 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T14:11:28,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T14:11:28,344 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-02T14:11:28,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T14:11:28,346 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T14:11:28,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T14:11:28,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42993 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-02T14:11:28,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:28,509 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing daf7958a964b19f953e8319bc097332e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T14:11:28,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/1ee0b9ab862845708e2a9ec653d2bc35 is 1080, key is row0001/info:/1733148688328/Put/seqid=0 2024-12-02T14:11:28,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741837_1013 (size=6033) 2024-12-02T14:11:28,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741837_1013 (size=6033) 2024-12-02T14:11:28,530 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/1ee0b9ab862845708e2a9ec653d2bc35 2024-12-02T14:11:28,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/1ee0b9ab862845708e2a9ec653d2bc35 as hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/1ee0b9ab862845708e2a9ec653d2bc35 2024-12-02T14:11:28,542 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/1ee0b9ab862845708e2a9ec653d2bc35, entries=1, sequenceid=5, filesize=5.9 K 2024-12-02T14:11:28,543 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for daf7958a964b19f953e8319bc097332e in 34ms, sequenceid=5, compaction requested=false 2024-12-02T14:11:28,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for daf7958a964b19f953e8319bc097332e: 2024-12-02T14:11:28,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:28,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-02T14:11:28,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-02T14:11:28,552 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-02T14:11:28,552 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 201 msec 2024-12-02T14:11:28,555 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 215 msec 2024-12-02T14:11:28,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:28,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:29,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:29,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:29,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:30,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:30,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:30,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:31,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:31,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 after 68066ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:11:31,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:31,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:32,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:32,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:32,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:33,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:33,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:33,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:34,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:34,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:34,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:35,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:35,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:35,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:36,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:36,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:36,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:37,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:37,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:37,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:38,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:38,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T14:11:38,378 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-02T14:11:38,381 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T14:11:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T14:11:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-02T14:11:38,384 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-02T14:11:38,385 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T14:11:38,385 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T14:11:38,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42993 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-02T14:11:38,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:38,541 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing daf7958a964b19f953e8319bc097332e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T14:11:38,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/2a0ea66ad5194c09af43f000d4e25fe3 is 1080, key is row0002/info:/1733148698379/Put/seqid=0 2024-12-02T14:11:38,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741838_1014 (size=6033) 2024-12-02T14:11:38,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741838_1014 (size=6033) 2024-12-02T14:11:38,559 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/2a0ea66ad5194c09af43f000d4e25fe3 2024-12-02T14:11:38,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/2a0ea66ad5194c09af43f000d4e25fe3 as hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/2a0ea66ad5194c09af43f000d4e25fe3 2024-12-02T14:11:38,570 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/2a0ea66ad5194c09af43f000d4e25fe3, entries=1, sequenceid=9, filesize=5.9 K 2024-12-02T14:11:38,571 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for daf7958a964b19f953e8319bc097332e in 31ms, sequenceid=9, compaction requested=false 2024-12-02T14:11:38,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for daf7958a964b19f953e8319bc097332e: 2024-12-02T14:11:38,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:38,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-02T14:11:38,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-02T14:11:38,575 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-02T14:11:38,575 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 188 msec 2024-12-02T14:11:38,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 195 msec 2024-12-02T14:11:38,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:38,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:38,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 after 68065ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:11:38,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta after 68052ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T14:11:39,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:39,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:39,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:40,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:40,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:40,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:41,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:41,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:41,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:42,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:42,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:42,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:43,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:43,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:43,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:44,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:44,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:44,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:45,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:45,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:45,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:46,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:46,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:46,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:47,027 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T14:11:47,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:47,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:47,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:48,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:48,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-02T14:11:48,419 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-02T14:11:48,422 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C42993%2C1733148677171.1733148708421 2024-12-02T14:11:48,427 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:48,427 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:48,427 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:48,427 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:48,427 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:48,427 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171/a3a61c9ba14f%2C42993%2C1733148677171.1733148677700 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171/a3a61c9ba14f%2C42993%2C1733148677171.1733148708421 2024-12-02T14:11:48,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741833_1009 (size=5546) 2024-12-02T14:11:48,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741833_1009 (size=5546) 2024-12-02T14:11:48,437 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42965:42965),(127.0.0.1/127.0.0.1:39319:39319)] 2024-12-02T14:11:48,437 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171/a3a61c9ba14f%2C42993%2C1733148677171.1733148677700 is not closed yet, will try archiving it next time 2024-12-02T14:11:48,438 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T14:11:48,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T14:11:48,441 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-02T14:11:48,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-02T14:11:48,442 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T14:11:48,442 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T14:11:48,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42993 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-02T14:11:48,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:48,597 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing daf7958a964b19f953e8319bc097332e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T14:11:48,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/b0c4b1f50e0d4a1e85b9a70f2ea6a83a is 1080, key is row0003/info:/1733148708420/Put/seqid=0 2024-12-02T14:11:48,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741840_1016 (size=6033) 2024-12-02T14:11:48,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741840_1016 (size=6033) 2024-12-02T14:11:48,617 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/b0c4b1f50e0d4a1e85b9a70f2ea6a83a 2024-12-02T14:11:48,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/b0c4b1f50e0d4a1e85b9a70f2ea6a83a as hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/b0c4b1f50e0d4a1e85b9a70f2ea6a83a 2024-12-02T14:11:48,633 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/b0c4b1f50e0d4a1e85b9a70f2ea6a83a, entries=1, sequenceid=13, filesize=5.9 K 2024-12-02T14:11:48,634 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for daf7958a964b19f953e8319bc097332e in 38ms, sequenceid=13, compaction requested=true 2024-12-02T14:11:48,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for daf7958a964b19f953e8319bc097332e: 2024-12-02T14:11:48,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:48,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-02T14:11:48,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-02T14:11:48,639 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-02T14:11:48,639 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 194 msec 2024-12-02T14:11:48,643 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 203 msec 2024-12-02T14:11:48,833 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171/a3a61c9ba14f%2C42993%2C1733148677171.1733148677700 to hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/oldWALs/a3a61c9ba14f%2C42993%2C1733148677171.1733148677700 2024-12-02T14:11:48,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:48,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:49,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:49,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:49,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:50,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:50,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:50,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:51,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:51,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:51,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:52,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:52,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:52,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:53,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:53,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:53,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:54,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:54,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:54,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:55,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:55,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:55,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:56,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:56,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:56,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:57,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:57,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:57,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:58,177 INFO [master/a3a61c9ba14f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T14:11:58,177 INFO [master/a3a61c9ba14f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T14:11:58,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:58,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-02T14:11:58,529 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-02T14:11:58,529 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:11:58,530 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T14:11:58,530 DEBUG [Time-limited test {}] regionserver.HStore(1541): daf7958a964b19f953e8319bc097332e/info is initiating minor compaction (all files) 2024-12-02T14:11:58,530 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T14:11:58,530 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:11:58,530 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of daf7958a964b19f953e8319bc097332e/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:58,530 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/1ee0b9ab862845708e2a9ec653d2bc35, hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/2a0ea66ad5194c09af43f000d4e25fe3, hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/b0c4b1f50e0d4a1e85b9a70f2ea6a83a] into tmpdir=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp, totalSize=17.7 K 2024-12-02T14:11:58,531 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1ee0b9ab862845708e2a9ec653d2bc35, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733148688328 2024-12-02T14:11:58,531 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 2a0ea66ad5194c09af43f000d4e25fe3, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733148698379 2024-12-02T14:11:58,532 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting b0c4b1f50e0d4a1e85b9a70f2ea6a83a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733148708420 2024-12-02T14:11:58,541 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): daf7958a964b19f953e8319bc097332e#info#compaction#44 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:11:58,542 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/5a1c898e39054de781036ed22ca00cc5 is 1080, key is row0001/info:/1733148688328/Put/seqid=0 2024-12-02T14:11:58,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741841_1017 (size=8296) 2024-12-02T14:11:58,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741841_1017 (size=8296) 2024-12-02T14:11:58,552 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/5a1c898e39054de781036ed22ca00cc5 as hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/5a1c898e39054de781036ed22ca00cc5 2024-12-02T14:11:58,560 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in daf7958a964b19f953e8319bc097332e/info of daf7958a964b19f953e8319bc097332e into 5a1c898e39054de781036ed22ca00cc5(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:11:58,560 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for daf7958a964b19f953e8319bc097332e: 2024-12-02T14:11:58,562 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C42993%2C1733148677171.1733148718562 2024-12-02T14:11:58,568 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:58,569 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:58,569 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:58,569 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:58,569 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:11:58,569 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171/a3a61c9ba14f%2C42993%2C1733148677171.1733148708421 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171/a3a61c9ba14f%2C42993%2C1733148677171.1733148718562 2024-12-02T14:11:58,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741839_1015 (size=2520) 2024-12-02T14:11:58,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741839_1015 (size=2520) 2024-12-02T14:11:58,572 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39319:39319),(127.0.0.1/127.0.0.1:42965:42965)] 2024-12-02T14:11:58,572 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T14:11:58,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T14:11:58,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-02T14:11:58,575 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-02T14:11:58,575 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-02T14:11:58,576 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-02T14:11:58,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42993 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-02T14:11:58,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:58,729 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing daf7958a964b19f953e8319bc097332e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T14:11:58,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/ba7b13030ce94d5290bf3a3047ad9552 is 1080, key is row0000/info:/1733148718561/Put/seqid=0 2024-12-02T14:11:58,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741843_1019 (size=6033) 2024-12-02T14:11:58,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741843_1019 (size=6033) 2024-12-02T14:11:58,741 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/ba7b13030ce94d5290bf3a3047ad9552 2024-12-02T14:11:58,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/ba7b13030ce94d5290bf3a3047ad9552 as hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/ba7b13030ce94d5290bf3a3047ad9552 2024-12-02T14:11:58,752 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/ba7b13030ce94d5290bf3a3047ad9552, entries=1, sequenceid=18, filesize=5.9 K 2024-12-02T14:11:58,753 INFO [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for daf7958a964b19f953e8319bc097332e in 24ms, sequenceid=18, compaction requested=false 2024-12-02T14:11:58,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for daf7958a964b19f953e8319bc097332e: 2024-12-02T14:11:58,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:11:58,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-02T14:11:58,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-02T14:11:58,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-02T14:11:58,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-12-02T14:11:58,759 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-12-02T14:11:58,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:58,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:59,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:59,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:11:59,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:00,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:00,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:00,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:01,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:01,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:01,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:02,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:02,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:02,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:03,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:03,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:03,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:03,991 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region daf7958a964b19f953e8319bc097332e, had cached 0 bytes from a total of 14329 2024-12-02T14:12:04,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:04,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:04,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:05,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:05,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:05,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:06,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:06,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:06,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:07,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:07,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:07,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:08,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:08,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43047 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-02T14:12:08,578 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-02T14:12:08,581 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C42993%2C1733148677171.1733148728580 2024-12-02T14:12:08,586 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,587 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,587 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,587 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,587 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,587 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171/a3a61c9ba14f%2C42993%2C1733148677171.1733148718562 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171/a3a61c9ba14f%2C42993%2C1733148677171.1733148728580 2024-12-02T14:12:08,588 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39319:39319),(127.0.0.1/127.0.0.1:42965:42965)] 2024-12-02T14:12:08,588 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171/a3a61c9ba14f%2C42993%2C1733148677171.1733148718562 is not closed yet, will try archiving it next time 2024-12-02T14:12:08,588 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171/a3a61c9ba14f%2C42993%2C1733148677171.1733148708421 to hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/oldWALs/a3a61c9ba14f%2C42993%2C1733148677171.1733148708421 2024-12-02T14:12:08,588 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T14:12:08,588 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:12:08,588 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:12:08,588 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:08,589 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:08,589 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T14:12:08,589 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T14:12:08,589 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=777818401, stopped=false 2024-12-02T14:12:08,589 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a3a61c9ba14f,43047,1733148677063 2024-12-02T14:12:08,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741842_1018 (size=2026) 2024-12-02T14:12:08,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741842_1018 (size=2026) 2024-12-02T14:12:08,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:12:08,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:08,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:12:08,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:08,591 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:12:08,591 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:12:08,591 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:12:08,591 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:12:08,591 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:08,591 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a3a61c9ba14f,42993,1733148677171' ***** 2024-12-02T14:12:08,591 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T14:12:08,591 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T14:12:08,592 INFO [RS:0;a3a61c9ba14f:42993 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T14:12:08,592 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T14:12:08,592 INFO [RS:0;a3a61c9ba14f:42993 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T14:12:08,592 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(3091): Received CLOSE for daf7958a964b19f953e8319bc097332e 2024-12-02T14:12:08,592 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:12:08,592 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(959): stopping server a3a61c9ba14f,42993,1733148677171 2024-12-02T14:12:08,592 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:12:08,592 INFO [RS:0;a3a61c9ba14f:42993 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a3a61c9ba14f:42993. 2024-12-02T14:12:08,592 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing daf7958a964b19f953e8319bc097332e, disabling compactions & flushes 2024-12-02T14:12:08,592 DEBUG [RS:0;a3a61c9ba14f:42993 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:12:08,592 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:12:08,592 DEBUG [RS:0;a3a61c9ba14f:42993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:08,592 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:12:08,592 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. after waiting 0 ms 2024-12-02T14:12:08,592 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:12:08,592 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T14:12:08,592 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T14:12:08,592 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T14:12:08,592 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing daf7958a964b19f953e8319bc097332e 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-02T14:12:08,593 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T14:12:08,593 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-02T14:12:08,593 DEBUG [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(1325): Online Regions={daf7958a964b19f953e8319bc097332e=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e., 1588230740=hbase:meta,,1.1588230740} 2024-12-02T14:12:08,593 DEBUG [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, daf7958a964b19f953e8319bc097332e 2024-12-02T14:12:08,593 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:12:08,593 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:12:08,593 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:12:08,593 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:12:08,593 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:12:08,593 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-02T14:12:08,597 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/d6b831753b884858834b38e6f9714fe1 is 1080, key is row0001/info:/1733148728579/Put/seqid=0 2024-12-02T14:12:08,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741845_1021 (size=6033) 2024-12-02T14:12:08,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741845_1021 (size=6033) 2024-12-02T14:12:08,604 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/d6b831753b884858834b38e6f9714fe1 2024-12-02T14:12:08,610 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/.tmp/info/02a3198401ba49d2ab42a93ac61d787c is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e./info:regioninfo/1733148679014/Put/seqid=0 2024-12-02T14:12:08,610 INFO [regionserver/a3a61c9ba14f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-02T14:12:08,610 INFO [regionserver/a3a61c9ba14f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-02T14:12:08,611 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/.tmp/info/d6b831753b884858834b38e6f9714fe1 as hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/d6b831753b884858834b38e6f9714fe1 2024-12-02T14:12:08,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741846_1022 (size=7308) 2024-12-02T14:12:08,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741846_1022 (size=7308) 2024-12-02T14:12:08,616 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/.tmp/info/02a3198401ba49d2ab42a93ac61d787c 2024-12-02T14:12:08,618 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/d6b831753b884858834b38e6f9714fe1, entries=1, sequenceid=22, filesize=5.9 K 2024-12-02T14:12:08,619 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for daf7958a964b19f953e8319bc097332e in 27ms, sequenceid=22, compaction requested=true 2024-12-02T14:12:08,620 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/1ee0b9ab862845708e2a9ec653d2bc35, hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/2a0ea66ad5194c09af43f000d4e25fe3, hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/b0c4b1f50e0d4a1e85b9a70f2ea6a83a] to archive 2024-12-02T14:12:08,621 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T14:12:08,623 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/1ee0b9ab862845708e2a9ec653d2bc35 to hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/1ee0b9ab862845708e2a9ec653d2bc35 2024-12-02T14:12:08,624 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/2a0ea66ad5194c09af43f000d4e25fe3 to hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/2a0ea66ad5194c09af43f000d4e25fe3 2024-12-02T14:12:08,626 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/b0c4b1f50e0d4a1e85b9a70f2ea6a83a to hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/info/b0c4b1f50e0d4a1e85b9a70f2ea6a83a 2024-12-02T14:12:08,626 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=a3a61c9ba14f:43047 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-02T14:12:08,626 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1ee0b9ab862845708e2a9ec653d2bc35=6033, 2a0ea66ad5194c09af43f000d4e25fe3=6033, b0c4b1f50e0d4a1e85b9a70f2ea6a83a=6033] 2024-12-02T14:12:08,631 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/daf7958a964b19f953e8319bc097332e/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-02T14:12:08,631 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:12:08,632 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for daf7958a964b19f953e8319bc097332e: Waiting for close lock at 1733148728592Running coprocessor pre-close hooks at 1733148728592Disabling compacts and flushes for region at 1733148728592Disabling writes for close at 1733148728592Obtaining lock to block concurrent updates at 1733148728593 (+1 ms)Preparing flush snapshotting stores in daf7958a964b19f953e8319bc097332e at 1733148728593Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733148728593Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. at 1733148728593Flushing daf7958a964b19f953e8319bc097332e/info: creating writer at 1733148728594 (+1 ms)Flushing daf7958a964b19f953e8319bc097332e/info: appending metadata at 1733148728596 (+2 ms)Flushing daf7958a964b19f953e8319bc097332e/info: closing flushed file at 1733148728596Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1150bfe1: reopening flushed file at 1733148728610 (+14 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for daf7958a964b19f953e8319bc097332e in 27ms, sequenceid=22, compaction requested=true at 1733148728620 (+10 ms)Writing region close event to WAL at 1733148728627 (+7 ms)Running coprocessor post-close hooks at 1733148728631 (+4 ms)Closed at 1733148728631 2024-12-02T14:12:08,632 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733148678227.daf7958a964b19f953e8319bc097332e. 2024-12-02T14:12:08,638 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/.tmp/ns/9579ff895027454987854945cc98018e is 43, key is default/ns:d/1733148678131/Put/seqid=0 2024-12-02T14:12:08,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741847_1023 (size=5153) 2024-12-02T14:12:08,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741847_1023 (size=5153) 2024-12-02T14:12:08,646 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/.tmp/ns/9579ff895027454987854945cc98018e 2024-12-02T14:12:08,669 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/.tmp/table/d8d80deb89054b9c9f44095e5892b0de is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733148679025/Put/seqid=0 2024-12-02T14:12:08,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741848_1024 (size=5508) 2024-12-02T14:12:08,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741848_1024 (size=5508) 2024-12-02T14:12:08,685 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/.tmp/table/d8d80deb89054b9c9f44095e5892b0de 2024-12-02T14:12:08,691 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/.tmp/info/02a3198401ba49d2ab42a93ac61d787c as hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/info/02a3198401ba49d2ab42a93ac61d787c 2024-12-02T14:12:08,696 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/info/02a3198401ba49d2ab42a93ac61d787c, entries=10, sequenceid=11, filesize=7.1 K 2024-12-02T14:12:08,697 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/.tmp/ns/9579ff895027454987854945cc98018e as hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/ns/9579ff895027454987854945cc98018e 2024-12-02T14:12:08,701 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/ns/9579ff895027454987854945cc98018e, entries=2, sequenceid=11, filesize=5.0 K 2024-12-02T14:12:08,702 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/.tmp/table/d8d80deb89054b9c9f44095e5892b0de as hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/table/d8d80deb89054b9c9f44095e5892b0de 2024-12-02T14:12:08,707 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/table/d8d80deb89054b9c9f44095e5892b0de, entries=2, sequenceid=11, filesize=5.4 K 2024-12-02T14:12:08,708 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 115ms, sequenceid=11, compaction requested=false 2024-12-02T14:12:08,712 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-02T14:12:08,713 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T14:12:08,713 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:12:08,713 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733148728593Running coprocessor pre-close hooks at 1733148728593Disabling compacts and flushes for region at 1733148728593Disabling writes for close at 1733148728593Obtaining lock to block concurrent updates at 1733148728593Preparing flush snapshotting stores in 1588230740 at 1733148728593Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733148728593Flushing stores of hbase:meta,,1.1588230740 at 1733148728594 (+1 ms)Flushing 1588230740/info: creating writer at 1733148728594Flushing 1588230740/info: appending metadata at 1733148728610 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733148728610Flushing 1588230740/ns: creating writer at 1733148728621 (+11 ms)Flushing 1588230740/ns: appending metadata at 1733148728637 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733148728637Flushing 1588230740/table: creating writer at 1733148728651 (+14 ms)Flushing 1588230740/table: appending metadata at 1733148728669 (+18 ms)Flushing 1588230740/table: closing flushed file at 1733148728669Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a5d162e: reopening flushed file at 1733148728690 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d55627f: reopening flushed file at 1733148728696 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@688c61db: reopening flushed file at 1733148728701 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 115ms, sequenceid=11, compaction requested=false at 1733148728708 (+7 ms)Writing region close event to WAL at 1733148728709 (+1 ms)Running coprocessor post-close hooks at 1733148728713 (+4 ms)Closed at 1733148728713 2024-12-02T14:12:08,713 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T14:12:08,793 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(976): stopping server a3a61c9ba14f,42993,1733148677171; all regions closed. 2024-12-02T14:12:08,794 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,794 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,794 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,794 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,794 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741834_1010 (size=3306) 2024-12-02T14:12:08,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741834_1010 (size=3306) 2024-12-02T14:12:08,799 DEBUG [RS:0;a3a61c9ba14f:42993 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/oldWALs 2024-12-02T14:12:08,799 INFO [RS:0;a3a61c9ba14f:42993 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C42993%2C1733148677171.meta:.meta(num 1733148678067) 2024-12-02T14:12:08,799 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,799 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,800 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,800 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,800 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:08,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741844_1020 (size=1252) 2024-12-02T14:12:08,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741844_1020 (size=1252) 2024-12-02T14:12:08,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:08,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:08,991 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/WALs/a3a61c9ba14f,42993,1733148677171/a3a61c9ba14f%2C42993%2C1733148677171.1733148718562 to hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/oldWALs/a3a61c9ba14f%2C42993%2C1733148677171.1733148718562 2024-12-02T14:12:08,997 DEBUG [RS:0;a3a61c9ba14f:42993 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/oldWALs 2024-12-02T14:12:08,997 INFO [RS:0;a3a61c9ba14f:42993 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C42993%2C1733148677171:(num 1733148728580) 2024-12-02T14:12:08,997 DEBUG [RS:0;a3a61c9ba14f:42993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:08,997 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:12:08,997 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:12:08,998 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.ChoreService(370): Chore service for: regionserver/a3a61c9ba14f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-02T14:12:08,998 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:12:08,998 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:12:08,999 INFO [RS:0;a3a61c9ba14f:42993 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42993 2024-12-02T14:12:09,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a3a61c9ba14f,42993,1733148677171 2024-12-02T14:12:09,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:12:09,001 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:12:09,001 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a3a61c9ba14f,42993,1733148677171] 2024-12-02T14:12:09,002 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a3a61c9ba14f,42993,1733148677171 already deleted, retry=false 2024-12-02T14:12:09,002 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a3a61c9ba14f,42993,1733148677171 expired; onlineServers=0 2024-12-02T14:12:09,002 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a3a61c9ba14f,43047,1733148677063' ***** 2024-12-02T14:12:09,002 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T14:12:09,002 INFO [M:0;a3a61c9ba14f:43047 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:12:09,002 INFO [M:0;a3a61c9ba14f:43047 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:12:09,002 DEBUG [M:0;a3a61c9ba14f:43047 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T14:12:09,003 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T14:12:09,003 DEBUG [M:0;a3a61c9ba14f:43047 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T14:12:09,003 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148677429 {}] cleaner.HFileCleaner(306): Exit Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148677429,5,FailOnTimeoutGroup] 2024-12-02T14:12:09,003 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148677433 {}] cleaner.HFileCleaner(306): Exit Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148677433,5,FailOnTimeoutGroup] 2024-12-02T14:12:09,003 INFO [M:0;a3a61c9ba14f:43047 {}] hbase.ChoreService(370): Chore service for: master/a3a61c9ba14f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T14:12:09,003 INFO [M:0;a3a61c9ba14f:43047 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:12:09,003 DEBUG [M:0;a3a61c9ba14f:43047 {}] master.HMaster(1795): Stopping service threads 2024-12-02T14:12:09,003 INFO [M:0;a3a61c9ba14f:43047 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T14:12:09,003 INFO [M:0;a3a61c9ba14f:43047 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:12:09,003 INFO [M:0;a3a61c9ba14f:43047 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T14:12:09,003 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T14:12:09,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T14:12:09,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:09,004 DEBUG [M:0;a3a61c9ba14f:43047 {}] zookeeper.ZKUtil(347): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T14:12:09,004 WARN [M:0;a3a61c9ba14f:43047 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T14:12:09,005 INFO [M:0;a3a61c9ba14f:43047 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/.lastflushedseqids 2024-12-02T14:12:09,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741849_1025 (size=130) 2024-12-02T14:12:09,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741849_1025 (size=130) 2024-12-02T14:12:09,018 INFO [M:0;a3a61c9ba14f:43047 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T14:12:09,018 INFO [M:0;a3a61c9ba14f:43047 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T14:12:09,018 DEBUG [M:0;a3a61c9ba14f:43047 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:12:09,018 INFO [M:0;a3a61c9ba14f:43047 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:09,018 DEBUG [M:0;a3a61c9ba14f:43047 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:09,018 DEBUG [M:0;a3a61c9ba14f:43047 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:12:09,018 DEBUG [M:0;a3a61c9ba14f:43047 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:09,018 INFO [M:0;a3a61c9ba14f:43047 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.55 KB heapSize=54.94 KB 2024-12-02T14:12:09,035 DEBUG [M:0;a3a61c9ba14f:43047 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c187f1efd105490a9b907ae272b78ecd is 82, key is hbase:meta,,1/info:regioninfo/1733148678114/Put/seqid=0 2024-12-02T14:12:09,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741850_1026 (size=5672) 2024-12-02T14:12:09,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741850_1026 (size=5672) 2024-12-02T14:12:09,045 INFO [M:0;a3a61c9ba14f:43047 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c187f1efd105490a9b907ae272b78ecd 2024-12-02T14:12:09,062 DEBUG [M:0;a3a61c9ba14f:43047 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6ae8c900de5f420b81aa9545a2323ddb is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733148679030/Put/seqid=0 2024-12-02T14:12:09,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741851_1027 (size=7819) 2024-12-02T14:12:09,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741851_1027 (size=7819) 2024-12-02T14:12:09,067 INFO [M:0;a3a61c9ba14f:43047 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.95 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6ae8c900de5f420b81aa9545a2323ddb 2024-12-02T14:12:09,072 INFO [M:0;a3a61c9ba14f:43047 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6ae8c900de5f420b81aa9545a2323ddb 2024-12-02T14:12:09,086 DEBUG [M:0;a3a61c9ba14f:43047 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eb96aea3130341c0af3af14450acd6bf is 69, key is a3a61c9ba14f,42993,1733148677171/rs:state/1733148677532/Put/seqid=0 2024-12-02T14:12:09,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741852_1028 (size=5156) 2024-12-02T14:12:09,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741852_1028 (size=5156) 2024-12-02T14:12:09,097 INFO [M:0;a3a61c9ba14f:43047 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eb96aea3130341c0af3af14450acd6bf 2024-12-02T14:12:09,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:12:09,102 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42993-0x1009b4570130001, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:12:09,103 INFO [RS:0;a3a61c9ba14f:42993 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:12:09,103 INFO [RS:0;a3a61c9ba14f:42993 {}] regionserver.HRegionServer(1031): Exiting; stopping=a3a61c9ba14f,42993,1733148677171; zookeeper connection closed. 2024-12-02T14:12:09,103 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1d813ed4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1d813ed4 2024-12-02T14:12:09,103 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T14:12:09,126 DEBUG [M:0;a3a61c9ba14f:43047 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7f938f1063a4eb29680be5da7d87647 is 52, key is load_balancer_on/state:d/1733148678221/Put/seqid=0 2024-12-02T14:12:09,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741853_1029 (size=5056) 2024-12-02T14:12:09,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741853_1029 (size=5056) 2024-12-02T14:12:09,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:09,531 INFO [M:0;a3a61c9ba14f:43047 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7f938f1063a4eb29680be5da7d87647 2024-12-02T14:12:09,540 DEBUG [M:0;a3a61c9ba14f:43047 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c187f1efd105490a9b907ae272b78ecd as hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c187f1efd105490a9b907ae272b78ecd 2024-12-02T14:12:09,547 INFO [M:0;a3a61c9ba14f:43047 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c187f1efd105490a9b907ae272b78ecd, entries=8, sequenceid=121, filesize=5.5 K 2024-12-02T14:12:09,550 DEBUG [M:0;a3a61c9ba14f:43047 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6ae8c900de5f420b81aa9545a2323ddb as hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6ae8c900de5f420b81aa9545a2323ddb 2024-12-02T14:12:09,555 INFO [M:0;a3a61c9ba14f:43047 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6ae8c900de5f420b81aa9545a2323ddb 2024-12-02T14:12:09,555 INFO [M:0;a3a61c9ba14f:43047 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6ae8c900de5f420b81aa9545a2323ddb, entries=14, sequenceid=121, filesize=7.6 K 2024-12-02T14:12:09,556 DEBUG [M:0;a3a61c9ba14f:43047 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/eb96aea3130341c0af3af14450acd6bf as hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/eb96aea3130341c0af3af14450acd6bf 2024-12-02T14:12:09,563 INFO [M:0;a3a61c9ba14f:43047 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/eb96aea3130341c0af3af14450acd6bf, entries=1, sequenceid=121, filesize=5.0 K 2024-12-02T14:12:09,564 DEBUG [M:0;a3a61c9ba14f:43047 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c7f938f1063a4eb29680be5da7d87647 as hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c7f938f1063a4eb29680be5da7d87647 2024-12-02T14:12:09,568 INFO [regionserver/a3a61c9ba14f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:12:09,571 INFO [M:0;a3a61c9ba14f:43047 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45363/user/jenkins/test-data/4bbd590e-40a2-91bb-f949-9fee625e60a1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c7f938f1063a4eb29680be5da7d87647, entries=1, sequenceid=121, filesize=4.9 K 2024-12-02T14:12:09,572 INFO [M:0;a3a61c9ba14f:43047 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.55 KB/44599, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 554ms, sequenceid=121, compaction requested=false 2024-12-02T14:12:09,577 INFO [M:0;a3a61c9ba14f:43047 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:09,577 DEBUG [M:0;a3a61c9ba14f:43047 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733148729018Disabling compacts and flushes for region at 1733148729018Disabling writes for close at 1733148729018Obtaining lock to block concurrent updates at 1733148729018Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733148729018Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44599, getHeapSize=56192, getOffHeapSize=0, getCellsCount=140 at 1733148729019 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733148729019Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733148729019Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733148729035 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733148729035Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733148729050 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733148729062 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733148729062Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733148729072 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733148729086 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733148729086Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733148729106 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733148729125 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733148729125Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76e519f5: reopening flushed file at 1733148729537 (+412 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d2f3b57: reopening flushed file at 1733148729547 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6da32e5c: reopening flushed file at 1733148729555 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f73646b: reopening flushed file at 1733148729563 (+8 ms)Finished flush of dataSize ~43.55 KB/44599, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 554ms, sequenceid=121, compaction requested=false at 1733148729572 (+9 ms)Writing region close event to WAL at 1733148729577 (+5 ms)Closed at 1733148729577 2024-12-02T14:12:09,581 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:09,581 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:09,581 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:09,581 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:09,581 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:09,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32821 is added to blk_1073741830_1006 (size=52996) 2024-12-02T14:12:09,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34479 is added to blk_1073741830_1006 (size=52996) 2024-12-02T14:12:09,585 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:12:09,585 INFO [M:0;a3a61c9ba14f:43047 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T14:12:09,585 INFO [M:0;a3a61c9ba14f:43047 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43047 2024-12-02T14:12:09,586 INFO [M:0;a3a61c9ba14f:43047 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:12:09,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:12:09,688 INFO [M:0;a3a61c9ba14f:43047 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:12:09,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43047-0x1009b4570130000, quorum=127.0.0.1:56298, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:12:09,690 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b8edabe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:12:09,691 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3838d9cb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:12:09,691 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:12:09,691 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@739c2ff2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:12:09,691 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fc981fd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/hadoop.log.dir/,STOPPED} 2024-12-02T14:12:09,693 WARN [BP-774725059-172.17.0.2-1733148676379 heartbeating to localhost/127.0.0.1:45363 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:12:09,693 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:12:09,693 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:12:09,693 WARN [BP-774725059-172.17.0.2-1733148676379 heartbeating to localhost/127.0.0.1:45363 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-774725059-172.17.0.2-1733148676379 (Datanode Uuid 0dba603a-0b0e-4ee3-b60f-168b8987b941) service to localhost/127.0.0.1:45363 2024-12-02T14:12:09,693 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/cluster_a0a71af9-d3d1-8389-4db9-571281a82091/data/data3/current/BP-774725059-172.17.0.2-1733148676379 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:12:09,694 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/cluster_a0a71af9-d3d1-8389-4db9-571281a82091/data/data4/current/BP-774725059-172.17.0.2-1733148676379 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:12:09,694 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:12:09,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ac76b28{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:12:09,704 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@371e2711{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:12:09,704 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:12:09,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45ae7776{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:12:09,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48bfafbe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/hadoop.log.dir/,STOPPED} 2024-12-02T14:12:09,706 WARN [BP-774725059-172.17.0.2-1733148676379 heartbeating to localhost/127.0.0.1:45363 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:12:09,706 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:12:09,706 WARN [BP-774725059-172.17.0.2-1733148676379 heartbeating to localhost/127.0.0.1:45363 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-774725059-172.17.0.2-1733148676379 (Datanode Uuid 8d0d4d90-c7a5-4e72-9d71-e8e3230ac31c) service to localhost/127.0.0.1:45363 2024-12-02T14:12:09,706 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:12:09,707 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/cluster_a0a71af9-d3d1-8389-4db9-571281a82091/data/data1/current/BP-774725059-172.17.0.2-1733148676379 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:12:09,707 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/cluster_a0a71af9-d3d1-8389-4db9-571281a82091/data/data2/current/BP-774725059-172.17.0.2-1733148676379 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:12:09,707 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:12:09,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@12208e1b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:12:09,713 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@56e526c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:12:09,713 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:12:09,713 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@521d1c5a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:12:09,713 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@252e2abb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/hadoop.log.dir/,STOPPED} 2024-12-02T14:12:09,718 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T14:12:09,735 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T14:12:09,742 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 180) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45363 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45363 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45363 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45363 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45363 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45363 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45363 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:45363 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=486 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=132 (was 96) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5777 (was 6249) 2024-12-02T14:12:09,749 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=486, MaxFileDescriptor=1048576, SystemLoadAverage=132, ProcessCount=11, AvailableMemoryMB=5777 2024-12-02T14:12:09,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T14:12:09,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/hadoop.log.dir so I do NOT create it in target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3 2024-12-02T14:12:09,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/49528bd8-f368-c99c-0298-078e8c125494/hadoop.tmp.dir so I do NOT create it in target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3 2024-12-02T14:12:09,749 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/cluster_7c29f41e-3a16-df41-7e87-cc8256159a84, deleteOnExit=true 2024-12-02T14:12:09,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T14:12:09,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/test.cache.data in system properties and HBase conf 2024-12-02T14:12:09,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T14:12:09,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/hadoop.log.dir in system properties and HBase conf 2024-12-02T14:12:09,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T14:12:09,749 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T14:12:09,750 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/nfs.dump.dir in system properties and HBase conf 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/java.io.tmpdir in system properties and HBase conf 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T14:12:09,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T14:12:09,760 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T14:12:09,805 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:12:09,808 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:12:09,809 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:12:09,809 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:12:09,809 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:12:09,809 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:12:09,810 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78d2a49d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:12:09,810 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11e6950{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:12:09,899 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@69826858{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/java.io.tmpdir/jetty-localhost-35087-hadoop-hdfs-3_4_1-tests_jar-_-any-18360061902433568769/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:12:09,899 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7faa894d{HTTP/1.1, (http/1.1)}{localhost:35087} 2024-12-02T14:12:09,899 INFO [Time-limited test {}] server.Server(415): Started @239562ms 2024-12-02T14:12:09,909 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T14:12:09,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:09,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:09,989 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:12:09,992 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:12:09,996 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:12:09,996 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:12:09,996 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T14:12:09,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@459363d7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:12:09,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4cb7e6ef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:12:10,097 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ce0132a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/java.io.tmpdir/jetty-localhost-38559-hadoop-hdfs-3_4_1-tests_jar-_-any-14469531896908340603/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:12:10,098 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f19e3c1{HTTP/1.1, (http/1.1)}{localhost:38559} 2024-12-02T14:12:10,098 INFO [Time-limited test {}] server.Server(415): Started @239760ms 2024-12-02T14:12:10,099 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:12:10,168 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:12:10,170 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:12:10,171 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:12:10,171 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:12:10,171 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:12:10,171 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51585bde{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:12:10,172 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43909889{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:12:10,188 WARN [Thread-1956 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/cluster_7c29f41e-3a16-df41-7e87-cc8256159a84/data/data2/current/BP-1782339526-172.17.0.2-1733148729772/current, will proceed with Du for space computation calculation, 2024-12-02T14:12:10,188 WARN [Thread-1955 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/cluster_7c29f41e-3a16-df41-7e87-cc8256159a84/data/data1/current/BP-1782339526-172.17.0.2-1733148729772/current, will proceed with Du for space computation calculation, 2024-12-02T14:12:10,211 WARN [Thread-1934 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:12:10,214 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaf5b320b055ab684 with lease ID 0x48e88246ba90663d: Processing first storage report for DS-b3222f79-998a-481b-9e58-4884dcb6c399 from datanode DatanodeRegistration(127.0.0.1:33913, datanodeUuid=36a111aa-de33-41b0-9b6d-79fb485abcca, infoPort=42483, infoSecurePort=0, ipcPort=32931, storageInfo=lv=-57;cid=testClusterID;nsid=1866510661;c=1733148729772) 2024-12-02T14:12:10,214 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaf5b320b055ab684 with lease ID 0x48e88246ba90663d: from storage DS-b3222f79-998a-481b-9e58-4884dcb6c399 node DatanodeRegistration(127.0.0.1:33913, datanodeUuid=36a111aa-de33-41b0-9b6d-79fb485abcca, infoPort=42483, infoSecurePort=0, ipcPort=32931, storageInfo=lv=-57;cid=testClusterID;nsid=1866510661;c=1733148729772), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:12:10,214 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaf5b320b055ab684 with lease ID 0x48e88246ba90663d: Processing first storage report for DS-c0886752-3268-409a-b68e-7679e122033e from datanode DatanodeRegistration(127.0.0.1:33913, datanodeUuid=36a111aa-de33-41b0-9b6d-79fb485abcca, infoPort=42483, infoSecurePort=0, ipcPort=32931, storageInfo=lv=-57;cid=testClusterID;nsid=1866510661;c=1733148729772) 2024-12-02T14:12:10,214 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaf5b320b055ab684 with lease ID 0x48e88246ba90663d: from storage DS-c0886752-3268-409a-b68e-7679e122033e node DatanodeRegistration(127.0.0.1:33913, datanodeUuid=36a111aa-de33-41b0-9b6d-79fb485abcca, infoPort=42483, infoSecurePort=0, ipcPort=32931, storageInfo=lv=-57;cid=testClusterID;nsid=1866510661;c=1733148729772), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T14:12:10,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:10,267 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d31ee43{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/java.io.tmpdir/jetty-localhost-44351-hadoop-hdfs-3_4_1-tests_jar-_-any-8722238760357055719/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:12:10,267 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4676912c{HTTP/1.1, (http/1.1)}{localhost:44351} 2024-12-02T14:12:10,267 INFO [Time-limited test {}] server.Server(415): Started @239930ms 2024-12-02T14:12:10,268 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:12:10,325 WARN [Thread-1981 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/cluster_7c29f41e-3a16-df41-7e87-cc8256159a84/data/data3/current/BP-1782339526-172.17.0.2-1733148729772/current, will proceed with Du for space computation calculation, 2024-12-02T14:12:10,325 WARN [Thread-1982 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/cluster_7c29f41e-3a16-df41-7e87-cc8256159a84/data/data4/current/BP-1782339526-172.17.0.2-1733148729772/current, will proceed with Du for space computation calculation, 2024-12-02T14:12:10,341 WARN [Thread-1970 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:12:10,347 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x85a7f585b5f2356b with lease ID 0x48e88246ba90663e: Processing first storage report for DS-3e0352b6-fe5b-46fd-967d-27ff0aaa53c6 from datanode DatanodeRegistration(127.0.0.1:37799, datanodeUuid=e03d48c0-2b2e-46d0-8ed2-f81fd33b3399, infoPort=42231, infoSecurePort=0, ipcPort=44151, storageInfo=lv=-57;cid=testClusterID;nsid=1866510661;c=1733148729772) 2024-12-02T14:12:10,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x85a7f585b5f2356b with lease ID 0x48e88246ba90663e: from storage DS-3e0352b6-fe5b-46fd-967d-27ff0aaa53c6 node DatanodeRegistration(127.0.0.1:37799, datanodeUuid=e03d48c0-2b2e-46d0-8ed2-f81fd33b3399, infoPort=42231, infoSecurePort=0, ipcPort=44151, storageInfo=lv=-57;cid=testClusterID;nsid=1866510661;c=1733148729772), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:12:10,347 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x85a7f585b5f2356b with lease ID 0x48e88246ba90663e: Processing first storage report for DS-d3c0ef4a-f60f-4c12-a339-a4506333c30a from datanode DatanodeRegistration(127.0.0.1:37799, datanodeUuid=e03d48c0-2b2e-46d0-8ed2-f81fd33b3399, infoPort=42231, infoSecurePort=0, ipcPort=44151, storageInfo=lv=-57;cid=testClusterID;nsid=1866510661;c=1733148729772) 2024-12-02T14:12:10,347 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x85a7f585b5f2356b with lease ID 0x48e88246ba90663e: from storage DS-d3c0ef4a-f60f-4c12-a339-a4506333c30a node DatanodeRegistration(127.0.0.1:37799, datanodeUuid=e03d48c0-2b2e-46d0-8ed2-f81fd33b3399, infoPort=42231, infoSecurePort=0, ipcPort=44151, storageInfo=lv=-57;cid=testClusterID;nsid=1866510661;c=1733148729772), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:12:10,388 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3 2024-12-02T14:12:10,390 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/cluster_7c29f41e-3a16-df41-7e87-cc8256159a84/zookeeper_0, clientPort=55059, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/cluster_7c29f41e-3a16-df41-7e87-cc8256159a84/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/cluster_7c29f41e-3a16-df41-7e87-cc8256159a84/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T14:12:10,391 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55059 2024-12-02T14:12:10,391 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:10,392 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:10,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:12:10,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:12:10,403 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f with version=8 2024-12-02T14:12:10,403 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/hbase-staging 2024-12-02T14:12:10,404 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:12:10,405 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:12:10,405 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:12:10,405 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:12:10,405 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:12:10,405 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:12:10,405 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T14:12:10,405 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:12:10,405 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46157 2024-12-02T14:12:10,407 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46157 connecting to ZooKeeper ensemble=127.0.0.1:55059 2024-12-02T14:12:10,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:461570x0, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:12:10,410 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46157-0x1009b4640840000 connected 2024-12-02T14:12:10,426 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:10,427 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:10,430 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:12:10,430 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f, hbase.cluster.distributed=false 2024-12-02T14:12:10,431 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:12:10,432 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46157 2024-12-02T14:12:10,433 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46157 2024-12-02T14:12:10,433 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46157 2024-12-02T14:12:10,433 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46157 2024-12-02T14:12:10,434 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46157 2024-12-02T14:12:10,446 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:12:10,446 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:12:10,446 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:12:10,446 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:12:10,446 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:12:10,446 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:12:10,446 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T14:12:10,446 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:12:10,447 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43655 2024-12-02T14:12:10,448 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43655 connecting to ZooKeeper ensemble=127.0.0.1:55059 2024-12-02T14:12:10,449 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:10,450 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:10,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:436550x0, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:12:10,454 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43655-0x1009b4640840001 connected 2024-12-02T14:12:10,454 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:12:10,455 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T14:12:10,457 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T14:12:10,458 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T14:12:10,458 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:12:10,461 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43655 2024-12-02T14:12:10,463 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43655 2024-12-02T14:12:10,463 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43655 2024-12-02T14:12:10,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43655 2024-12-02T14:12:10,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43655 2024-12-02T14:12:10,478 DEBUG [M:0;a3a61c9ba14f:46157 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a3a61c9ba14f:46157 2024-12-02T14:12:10,478 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a3a61c9ba14f,46157,1733148730404 2024-12-02T14:12:10,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:12:10,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:12:10,480 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a3a61c9ba14f,46157,1733148730404 2024-12-02T14:12:10,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T14:12:10,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:10,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:10,481 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T14:12:10,482 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a3a61c9ba14f,46157,1733148730404 from backup master directory 2024-12-02T14:12:10,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a3a61c9ba14f,46157,1733148730404 2024-12-02T14:12:10,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:12:10,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:12:10,483 WARN [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:12:10,483 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a3a61c9ba14f,46157,1733148730404 2024-12-02T14:12:10,487 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/hbase.id] with ID: d7a91fe6-12ef-49fa-bc8d-6ef8d1fc64d3 2024-12-02T14:12:10,487 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/.tmp/hbase.id 2024-12-02T14:12:10,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:12:10,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:12:10,496 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/.tmp/hbase.id]:[hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/hbase.id] 2024-12-02T14:12:10,507 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:10,507 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T14:12:10,508 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-02T14:12:10,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:10,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:10,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:12:10,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:12:10,520 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T14:12:10,520 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T14:12:10,521 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:12:10,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:12:10,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:12:10,528 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store 2024-12-02T14:12:10,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:12:10,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:12:10,534 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:12:10,534 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:12:10,534 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:10,534 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:10,534 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:12:10,534 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:10,534 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:10,534 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733148730534Disabling compacts and flushes for region at 1733148730534Disabling writes for close at 1733148730534Writing region close event to WAL at 1733148730534Closed at 1733148730534 2024-12-02T14:12:10,535 WARN [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/.initializing 2024-12-02T14:12:10,535 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/WALs/a3a61c9ba14f,46157,1733148730404 2024-12-02T14:12:10,537 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C46157%2C1733148730404, suffix=, logDir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/WALs/a3a61c9ba14f,46157,1733148730404, archiveDir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/oldWALs, maxLogs=10 2024-12-02T14:12:10,538 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C46157%2C1733148730404.1733148730537 2024-12-02T14:12:10,544 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/WALs/a3a61c9ba14f,46157,1733148730404/a3a61c9ba14f%2C46157%2C1733148730404.1733148730537 2024-12-02T14:12:10,545 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42231:42231),(127.0.0.1/127.0.0.1:42483:42483)] 2024-12-02T14:12:10,549 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:12:10,549 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:12:10,549 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:10,549 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:10,550 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:10,551 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T14:12:10,551 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:10,552 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:10,552 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:10,553 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T14:12:10,553 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:10,553 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:12:10,553 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:10,554 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T14:12:10,554 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:10,555 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:12:10,555 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:10,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T14:12:10,556 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:10,557 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:12:10,557 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:10,557 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:10,558 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:10,559 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:10,559 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:10,559 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T14:12:10,560 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:10,562 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:12:10,562 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809579, jitterRate=0.0294332355260849}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T14:12:10,563 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733148730549Initializing all the Stores at 1733148730550 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148730550Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148730550Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148730550Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148730550Cleaning up temporary data from old regions at 1733148730559 (+9 ms)Region opened successfully at 1733148730563 (+4 ms) 2024-12-02T14:12:10,563 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T14:12:10,566 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4706fb21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:12:10,567 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T14:12:10,567 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T14:12:10,567 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T14:12:10,567 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T14:12:10,568 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T14:12:10,568 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-02T14:12:10,568 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T14:12:10,571 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T14:12:10,571 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T14:12:10,572 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T14:12:10,572 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T14:12:10,573 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T14:12:10,574 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T14:12:10,574 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T14:12:10,575 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T14:12:10,577 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T14:12:10,578 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T14:12:10,579 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T14:12:10,581 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T14:12:10,582 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T14:12:10,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:12:10,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:10,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:12:10,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:10,585 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a3a61c9ba14f,46157,1733148730404, sessionid=0x1009b4640840000, setting cluster-up flag (Was=false) 2024-12-02T14:12:10,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:10,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:10,590 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T14:12:10,591 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a3a61c9ba14f,46157,1733148730404 2024-12-02T14:12:10,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:10,594 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:10,600 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T14:12:10,608 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a3a61c9ba14f,46157,1733148730404 2024-12-02T14:12:10,610 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T14:12:10,612 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T14:12:10,612 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T14:12:10,612 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T14:12:10,613 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a3a61c9ba14f,46157,1733148730404 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T14:12:10,615 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:12:10,615 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:12:10,615 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:12:10,615 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:12:10,615 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a3a61c9ba14f:0, corePoolSize=10, maxPoolSize=10 2024-12-02T14:12:10,615 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:10,615 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:12:10,615 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:10,622 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733148760622 2024-12-02T14:12:10,622 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T14:12:10,622 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T14:12:10,622 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T14:12:10,622 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T14:12:10,622 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T14:12:10,622 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T14:12:10,623 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:10,623 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:12:10,623 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T14:12:10,624 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:10,625 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T14:12:10,625 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T14:12:10,625 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T14:12:10,625 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T14:12:10,629 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T14:12:10,629 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T14:12:10,629 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148730629,5,FailOnTimeoutGroup] 2024-12-02T14:12:10,630 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148730630,5,FailOnTimeoutGroup] 2024-12-02T14:12:10,630 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:10,630 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T14:12:10,630 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:10,630 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:10,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:12:10,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:12:10,638 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T14:12:10,638 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f 2024-12-02T14:12:10,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:12:10,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:12:10,648 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:12:10,649 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:12:10,650 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:12:10,651 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:10,651 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:10,651 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:12:10,673 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:12:10,674 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:10,674 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:10,674 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:12:10,675 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:12:10,675 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:10,676 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:10,676 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:12:10,677 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:12:10,677 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:10,677 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:10,678 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:12:10,678 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(746): ClusterId : d7a91fe6-12ef-49fa-bc8d-6ef8d1fc64d3 2024-12-02T14:12:10,678 DEBUG [RS:0;a3a61c9ba14f:43655 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T14:12:10,678 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740 2024-12-02T14:12:10,678 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740 2024-12-02T14:12:10,679 DEBUG [RS:0;a3a61c9ba14f:43655 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T14:12:10,679 DEBUG [RS:0;a3a61c9ba14f:43655 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T14:12:10,680 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:12:10,680 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:12:10,680 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:12:10,681 DEBUG [RS:0;a3a61c9ba14f:43655 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T14:12:10,681 DEBUG [RS:0;a3a61c9ba14f:43655 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1da52e98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:12:10,682 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:12:10,684 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:12:10,684 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817688, jitterRate=0.03974528610706329}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:12:10,685 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733148730648Initializing all the Stores at 1733148730649 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148730649Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148730649Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148730649Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148730649Cleaning up temporary data from old regions at 1733148730680 (+31 ms)Region opened successfully at 1733148730685 (+5 ms) 2024-12-02T14:12:10,685 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:12:10,685 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:12:10,685 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:12:10,685 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:12:10,685 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:12:10,686 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:12:10,686 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733148730685Disabling compacts and flushes for region at 1733148730685Disabling writes for close at 1733148730685Writing region close event to WAL at 1733148730686 (+1 ms)Closed at 1733148730686 2024-12-02T14:12:10,687 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:12:10,687 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T14:12:10,687 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T14:12:10,689 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:12:10,690 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T14:12:10,693 DEBUG [RS:0;a3a61c9ba14f:43655 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a3a61c9ba14f:43655 2024-12-02T14:12:10,693 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T14:12:10,693 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T14:12:10,693 DEBUG [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T14:12:10,694 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(2659): reportForDuty to master=a3a61c9ba14f,46157,1733148730404 with port=43655, startcode=1733148730445 2024-12-02T14:12:10,694 DEBUG [RS:0;a3a61c9ba14f:43655 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T14:12:10,695 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57953, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T14:12:10,696 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46157 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:10,696 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46157 {}] master.ServerManager(517): Registering regionserver=a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:10,697 DEBUG [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f 2024-12-02T14:12:10,697 DEBUG [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37165 2024-12-02T14:12:10,697 DEBUG [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T14:12:10,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:12:10,699 DEBUG [RS:0;a3a61c9ba14f:43655 {}] zookeeper.ZKUtil(111): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:10,699 WARN [RS:0;a3a61c9ba14f:43655 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:12:10,699 INFO [RS:0;a3a61c9ba14f:43655 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:12:10,699 DEBUG [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/WALs/a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:10,700 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a3a61c9ba14f,43655,1733148730445] 2024-12-02T14:12:10,703 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T14:12:10,705 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T14:12:10,705 INFO [RS:0;a3a61c9ba14f:43655 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T14:12:10,705 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:10,705 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T14:12:10,706 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T14:12:10,706 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:10,706 DEBUG [RS:0;a3a61c9ba14f:43655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:10,706 DEBUG [RS:0;a3a61c9ba14f:43655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:10,706 DEBUG [RS:0;a3a61c9ba14f:43655 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:10,706 DEBUG [RS:0;a3a61c9ba14f:43655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:10,706 DEBUG [RS:0;a3a61c9ba14f:43655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:10,707 DEBUG [RS:0;a3a61c9ba14f:43655 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:12:10,707 DEBUG [RS:0;a3a61c9ba14f:43655 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:10,707 DEBUG [RS:0;a3a61c9ba14f:43655 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:10,707 DEBUG [RS:0;a3a61c9ba14f:43655 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:10,707 DEBUG [RS:0;a3a61c9ba14f:43655 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:10,707 DEBUG [RS:0;a3a61c9ba14f:43655 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:10,707 DEBUG [RS:0;a3a61c9ba14f:43655 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:10,707 DEBUG [RS:0;a3a61c9ba14f:43655 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:12:10,707 DEBUG [RS:0;a3a61c9ba14f:43655 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:12:10,709 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:10,709 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:10,709 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:10,709 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:10,709 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:10,709 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,43655,1733148730445-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:12:10,723 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T14:12:10,723 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,43655,1733148730445-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:10,723 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:10,723 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.Replication(171): a3a61c9ba14f,43655,1733148730445 started 2024-12-02T14:12:10,736 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:10,736 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(1482): Serving as a3a61c9ba14f,43655,1733148730445, RpcServer on a3a61c9ba14f/172.17.0.2:43655, sessionid=0x1009b4640840001 2024-12-02T14:12:10,736 DEBUG [RS:0;a3a61c9ba14f:43655 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T14:12:10,736 DEBUG [RS:0;a3a61c9ba14f:43655 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:10,736 DEBUG [RS:0;a3a61c9ba14f:43655 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,43655,1733148730445' 2024-12-02T14:12:10,736 DEBUG [RS:0;a3a61c9ba14f:43655 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T14:12:10,737 DEBUG [RS:0;a3a61c9ba14f:43655 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T14:12:10,737 DEBUG [RS:0;a3a61c9ba14f:43655 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T14:12:10,737 DEBUG [RS:0;a3a61c9ba14f:43655 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T14:12:10,737 DEBUG [RS:0;a3a61c9ba14f:43655 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:10,737 DEBUG [RS:0;a3a61c9ba14f:43655 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,43655,1733148730445' 2024-12-02T14:12:10,737 DEBUG [RS:0;a3a61c9ba14f:43655 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T14:12:10,738 DEBUG [RS:0;a3a61c9ba14f:43655 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T14:12:10,738 DEBUG [RS:0;a3a61c9ba14f:43655 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T14:12:10,738 INFO [RS:0;a3a61c9ba14f:43655 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T14:12:10,738 INFO [RS:0;a3a61c9ba14f:43655 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T14:12:10,840 INFO [RS:0;a3a61c9ba14f:43655 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C43655%2C1733148730445, suffix=, logDir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/WALs/a3a61c9ba14f,43655,1733148730445, archiveDir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/oldWALs, maxLogs=32 2024-12-02T14:12:10,840 WARN [a3a61c9ba14f:46157 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T14:12:10,841 INFO [RS:0;a3a61c9ba14f:43655 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C43655%2C1733148730445.1733148730840 2024-12-02T14:12:10,847 INFO [RS:0;a3a61c9ba14f:43655 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/WALs/a3a61c9ba14f,43655,1733148730445/a3a61c9ba14f%2C43655%2C1733148730445.1733148730840 2024-12-02T14:12:10,848 DEBUG [RS:0;a3a61c9ba14f:43655 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42483:42483),(127.0.0.1/127.0.0.1:42231:42231)] 2024-12-02T14:12:10,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:10,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:11,091 DEBUG [a3a61c9ba14f:46157 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T14:12:11,091 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:11,092 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a3a61c9ba14f,43655,1733148730445, state=OPENING 2024-12-02T14:12:11,093 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T14:12:11,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:11,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:11,095 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:12:11,095 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:12:11,095 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:12:11,095 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,43655,1733148730445}] 2024-12-02T14:12:11,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:11,249 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T14:12:11,251 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49333, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T14:12:11,255 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T14:12:11,255 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:12:11,257 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C43655%2C1733148730445.meta, suffix=.meta, logDir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/WALs/a3a61c9ba14f,43655,1733148730445, archiveDir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/oldWALs, maxLogs=32 2024-12-02T14:12:11,258 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C43655%2C1733148730445.meta.1733148731257.meta 2024-12-02T14:12:11,267 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/WALs/a3a61c9ba14f,43655,1733148730445/a3a61c9ba14f%2C43655%2C1733148730445.meta.1733148731257.meta 2024-12-02T14:12:11,268 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42231:42231),(127.0.0.1/127.0.0.1:42483:42483)] 2024-12-02T14:12:11,273 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:12:11,273 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T14:12:11,274 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T14:12:11,274 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T14:12:11,274 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T14:12:11,274 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:12:11,274 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T14:12:11,274 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T14:12:11,276 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:12:11,277 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:12:11,277 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:11,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:11,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:12:11,280 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:12:11,280 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:11,281 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:11,281 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:12:11,282 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:12:11,282 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:11,282 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:11,283 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:12:11,283 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:12:11,283 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:11,284 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:11,284 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:12:11,285 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740 2024-12-02T14:12:11,286 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740 2024-12-02T14:12:11,287 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:12:11,287 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:12:11,288 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:12:11,289 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:12:11,290 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708169, jitterRate=-0.09951670467853546}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:12:11,290 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T14:12:11,290 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733148731274Writing region info on filesystem at 1733148731274Initializing all the Stores at 1733148731275 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148731275Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148731276 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148731276Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148731276Cleaning up temporary data from old regions at 1733148731287 (+11 ms)Running coprocessor post-open hooks at 1733148731290 (+3 ms)Region opened successfully at 1733148731290 2024-12-02T14:12:11,292 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733148731248 2024-12-02T14:12:11,294 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T14:12:11,294 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T14:12:11,296 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:11,297 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a3a61c9ba14f,43655,1733148730445, state=OPEN 2024-12-02T14:12:11,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:12:11,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:12:11,299 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:11,299 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:12:11,299 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:12:11,302 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T14:12:11,302 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,43655,1733148730445 in 204 msec 2024-12-02T14:12:11,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T14:12:11,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 615 msec 2024-12-02T14:12:11,306 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:12:11,306 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T14:12:11,309 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:12:11,309 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a3a61c9ba14f,43655,1733148730445, seqNum=-1] 2024-12-02T14:12:11,310 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:12:11,311 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42451, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:12:11,316 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 705 msec 2024-12-02T14:12:11,317 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733148731316, completionTime=-1 2024-12-02T14:12:11,317 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T14:12:11,317 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T14:12:11,319 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-02T14:12:11,319 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733148791319 2024-12-02T14:12:11,319 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733148851319 2024-12-02T14:12:11,319 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-02T14:12:11,319 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,46157,1733148730404-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:11,319 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,46157,1733148730404-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:11,319 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,46157,1733148730404-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:11,319 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a3a61c9ba14f:46157, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:11,320 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:11,320 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:11,321 DEBUG [master/a3a61c9ba14f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T14:12:11,324 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.841sec 2024-12-02T14:12:11,324 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T14:12:11,324 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T14:12:11,324 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T14:12:11,324 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T14:12:11,324 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T14:12:11,324 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,46157,1733148730404-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:12:11,324 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,46157,1733148730404-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T14:12:11,326 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T14:12:11,327 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T14:12:11,327 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,46157,1733148730404-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:11,378 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f81fda7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:12:11,379 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a3a61c9ba14f,46157,-1 for getting cluster id 2024-12-02T14:12:11,379 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T14:12:11,380 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd7a91fe6-12ef-49fa-bc8d-6ef8d1fc64d3' 2024-12-02T14:12:11,381 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T14:12:11,381 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d7a91fe6-12ef-49fa-bc8d-6ef8d1fc64d3" 2024-12-02T14:12:11,381 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12396787, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:12:11,381 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a3a61c9ba14f,46157,-1] 2024-12-02T14:12:11,381 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T14:12:11,382 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:11,383 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52082, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T14:12:11,384 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3323ea67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:12:11,384 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:12:11,385 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a3a61c9ba14f,43655,1733148730445, seqNum=-1] 2024-12-02T14:12:11,385 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:12:11,386 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35960, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:12:11,387 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a3a61c9ba14f,46157,1733148730404 2024-12-02T14:12:11,387 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:11,389 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-02T14:12:11,390 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T14:12:11,390 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is a3a61c9ba14f,46157,1733148730404 2024-12-02T14:12:11,390 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@737f50c5 2024-12-02T14:12:11,391 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T14:12:11,392 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52086, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T14:12:11,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46157 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-02T14:12:11,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46157 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-02T14:12:11,392 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46157 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T14:12:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46157 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-02T14:12:11,395 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T14:12:11,395 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:11,395 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46157 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-02T14:12:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46157 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T14:12:11,396 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T14:12:11,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741835_1011 (size=381) 2024-12-02T14:12:11,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741835_1011 (size=381) 2024-12-02T14:12:11,405 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, NAME => 'TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f 2024-12-02T14:12:11,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741836_1012 (size=64) 2024-12-02T14:12:11,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741836_1012 (size=64) 2024-12-02T14:12:11,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:12:11,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, disabling compactions & flushes 2024-12-02T14:12:11,411 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:11,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:11,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. after waiting 0 ms 2024-12-02T14:12:11,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:11,411 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:11,411 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd: Waiting for close lock at 1733148731411Disabling compacts and flushes for region at 1733148731411Disabling writes for close at 1733148731411Writing region close event to WAL at 1733148731411Closed at 1733148731411 2024-12-02T14:12:11,412 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T14:12:11,413 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733148731412"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733148731412"}]},"ts":"1733148731412"} 2024-12-02T14:12:11,415 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T14:12:11,416 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T14:12:11,416 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733148731416"}]},"ts":"1733148731416"} 2024-12-02T14:12:11,417 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-02T14:12:11,418 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, ASSIGN}] 2024-12-02T14:12:11,419 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, ASSIGN 2024-12-02T14:12:11,420 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, ASSIGN; state=OFFLINE, location=a3a61c9ba14f,43655,1733148730445; forceNewPlan=false, retain=false 2024-12-02T14:12:11,571 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, regionState=OPENING, regionLocation=a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:11,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, ASSIGN because future has completed 2024-12-02T14:12:11,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, server=a3a61c9ba14f,43655,1733148730445}] 2024-12-02T14:12:11,736 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:11,736 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, NAME => 'TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:12:11,737 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:11,737 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:12:11,737 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:11,737 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:11,738 INFO [StoreOpener-52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:11,739 INFO [StoreOpener-52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd columnFamilyName info 2024-12-02T14:12:11,740 DEBUG [StoreOpener-52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:11,740 INFO [StoreOpener-52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-1 {}] regionserver.HStore(327): Store=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:12:11,740 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:11,741 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:11,741 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:11,742 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:11,742 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:11,744 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:11,746 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:12:11,747 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705829, jitterRate=-0.10249242186546326}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:12:11,747 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:11,747 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd: Running coprocessor pre-open hook at 1733148731737Writing region info on filesystem at 1733148731737Initializing all the Stores at 1733148731738 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148731738Cleaning up temporary data from old regions at 1733148731742 (+4 ms)Running coprocessor post-open hooks at 1733148731747 (+5 ms)Region opened successfully at 1733148731747 2024-12-02T14:12:11,748 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd., pid=6, masterSystemTime=1733148731732 2024-12-02T14:12:11,751 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:11,751 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:11,752 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, regionState=OPEN, openSeqNum=2, regionLocation=a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:11,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, server=a3a61c9ba14f,43655,1733148730445 because future has completed 2024-12-02T14:12:11,759 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T14:12:11,759 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, server=a3a61c9ba14f,43655,1733148730445 in 177 msec 2024-12-02T14:12:11,761 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T14:12:11,761 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, ASSIGN in 341 msec 2024-12-02T14:12:11,762 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T14:12:11,762 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733148731762"}]},"ts":"1733148731762"} 2024-12-02T14:12:11,765 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-02T14:12:11,766 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T14:12:11,768 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 374 msec 2024-12-02T14:12:11,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:11,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:12,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:12,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:12,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:13,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:13,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,633 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,633 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,635 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,637 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,637 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,655 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,655 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,655 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,655 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,655 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,659 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:13,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:13,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:14,164 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T14:12:14,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,170 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,170 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,190 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,191 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,193 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:14,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:14,724 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-02T14:12:14,724 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-02T14:12:14,725 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-02T14:12:14,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:14,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:15,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:15,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:15,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:16,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:16,704 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T14:12:16,705 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-02T14:12:16,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:16,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:17,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:17,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:17,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:18,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:18,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:18,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:19,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:19,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:19,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:20,231 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T14:12:20,233 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,234 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,234 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,235 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:20,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,262 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:20,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:20,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:21,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:21,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46157 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T14:12:21,418 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-02T14:12:21,418 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-02T14:12:21,421 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-02T14:12:21,421 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:21,424 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd., hostname=a3a61c9ba14f,43655,1733148730445, seqNum=2] 2024-12-02T14:12:21,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:21,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T14:12:21,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/46a23d36d23848189c546d49596bf08c is 1080, key is row0001/info:/1733148741426/Put/seqid=0 2024-12-02T14:12:21,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741837_1013 (size=12509) 2024-12-02T14:12:21,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/46a23d36d23848189c546d49596bf08c 2024-12-02T14:12:21,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741837_1013 (size=12509) 2024-12-02T14:12:21,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/46a23d36d23848189c546d49596bf08c as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/46a23d36d23848189c546d49596bf08c 2024-12-02T14:12:21,505 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/46a23d36d23848189c546d49596bf08c, entries=7, sequenceid=11, filesize=12.2 K 2024-12-02T14:12:21,507 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd in 65ms, sequenceid=11, compaction requested=false 2024-12-02T14:12:21,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd: 2024-12-02T14:12:21,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:21,510 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-12-02T14:12:21,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/9a1cdf4084e244599283ecfd11c2f866 is 1080, key is row0008/info:/1733148741444/Put/seqid=0 2024-12-02T14:12:21,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741838_1014 (size=23299) 2024-12-02T14:12:21,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741838_1014 (size=23299) 2024-12-02T14:12:21,531 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/9a1cdf4084e244599283ecfd11c2f866 2024-12-02T14:12:21,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/9a1cdf4084e244599283ecfd11c2f866 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9a1cdf4084e244599283ecfd11c2f866 2024-12-02T14:12:21,543 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9a1cdf4084e244599283ecfd11c2f866, entries=17, sequenceid=31, filesize=22.8 K 2024-12-02T14:12:21,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=8.41 KB/8608 for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd in 34ms, sequenceid=31, compaction requested=false 2024-12-02T14:12:21,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd: 2024-12-02T14:12:21,545 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=35.0 K, sizeToCheck=16.0 K 2024-12-02T14:12:21,545 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:21,545 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9a1cdf4084e244599283ecfd11c2f866 because midkey is the same as first or last row 2024-12-02T14:12:21,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:21,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:22,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:22,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:22,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:23,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:23,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:23,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-02T14:12:23,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/9c9f81a8704449729f1846578781d47c is 1080, key is row0025/info:/1733148741512/Put/seqid=0 2024-12-02T14:12:23,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741839_1015 (size=14663) 2024-12-02T14:12:23,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741839_1015 (size=14663) 2024-12-02T14:12:23,549 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/9c9f81a8704449729f1846578781d47c 2024-12-02T14:12:23,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/9c9f81a8704449729f1846578781d47c as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9c9f81a8704449729f1846578781d47c 2024-12-02T14:12:23,561 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9c9f81a8704449729f1846578781d47c, entries=9, sequenceid=43, filesize=14.3 K 2024-12-02T14:12:23,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=10.51 KB/10760 for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd in 27ms, sequenceid=43, compaction requested=true 2024-12-02T14:12:23,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd: 2024-12-02T14:12:23,562 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-12-02T14:12:23,562 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:23,562 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9a1cdf4084e244599283ecfd11c2f866 because midkey is the same as first or last row 2024-12-02T14:12:23,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T14:12:23,563 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:12:23,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:23,564 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T14:12:23,564 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1541): 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info is initiating minor compaction (all files) 2024-12-02T14:12:23,564 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info in TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:23,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:23,564 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/46a23d36d23848189c546d49596bf08c, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9a1cdf4084e244599283ecfd11c2f866, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9c9f81a8704449729f1846578781d47c] into tmpdir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp, totalSize=49.3 K 2024-12-02T14:12:23,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-02T14:12:23,565 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 46a23d36d23848189c546d49596bf08c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733148741426 2024-12-02T14:12:23,565 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9a1cdf4084e244599283ecfd11c2f866, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733148741444 2024-12-02T14:12:23,566 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9c9f81a8704449729f1846578781d47c, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1733148741512 2024-12-02T14:12:23,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/070d7860816a4dcba762410386ce8128 is 1080, key is row0034/info:/1733148743538/Put/seqid=0 2024-12-02T14:12:23,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741840_1016 (size=16817) 2024-12-02T14:12:23,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741840_1016 (size=16817) 2024-12-02T14:12:23,577 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/070d7860816a4dcba762410386ce8128 2024-12-02T14:12:23,584 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd#info#compaction#58 average throughput is 33.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:12:23,584 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/3164a0b4903a42d19696045380bdb66c is 1080, key is row0001/info:/1733148741426/Put/seqid=0 2024-12-02T14:12:23,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/070d7860816a4dcba762410386ce8128 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/070d7860816a4dcba762410386ce8128 2024-12-02T14:12:23,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/070d7860816a4dcba762410386ce8128, entries=11, sequenceid=57, filesize=16.4 K 2024-12-02T14:12:23,593 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=12.61 KB/12912 for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd in 29ms, sequenceid=57, compaction requested=false 2024-12-02T14:12:23,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd: 2024-12-02T14:12:23,593 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.7 K, sizeToCheck=16.0 K 2024-12-02T14:12:23,593 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:23,593 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9a1cdf4084e244599283ecfd11c2f866 because midkey is the same as first or last row 2024-12-02T14:12:23,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:23,595 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-02T14:12:23,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/364ca129cb8e4761bdbd692536660af6 is 1080, key is row0045/info:/1733148743566/Put/seqid=0 2024-12-02T14:12:23,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741841_1017 (size=40670) 2024-12-02T14:12:23,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741841_1017 (size=40670) 2024-12-02T14:12:23,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741842_1018 (size=18987) 2024-12-02T14:12:23,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741842_1018 (size=18987) 2024-12-02T14:12:23,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/364ca129cb8e4761bdbd692536660af6 2024-12-02T14:12:23,620 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/3164a0b4903a42d19696045380bdb66c as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/3164a0b4903a42d19696045380bdb66c 2024-12-02T14:12:23,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/364ca129cb8e4761bdbd692536660af6 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/364ca129cb8e4761bdbd692536660af6 2024-12-02T14:12:23,626 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info of 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd into 3164a0b4903a42d19696045380bdb66c(size=39.7 K), total size for store is 56.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:12:23,626 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd: 2024-12-02T14:12:23,626 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd., storeName=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info, priority=13, startTime=1733148743562; duration=0sec 2024-12-02T14:12:23,627 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-12-02T14:12:23,627 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:23,627 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/3164a0b4903a42d19696045380bdb66c because midkey is the same as first or last row 2024-12-02T14:12:23,627 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-12-02T14:12:23,627 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:23,627 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/3164a0b4903a42d19696045380bdb66c because midkey is the same as first or last row 2024-12-02T14:12:23,627 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-12-02T14:12:23,627 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:23,627 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/3164a0b4903a42d19696045380bdb66c because midkey is the same as first or last row 2024-12-02T14:12:23,627 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:23,627 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/364ca129cb8e4761bdbd692536660af6, entries=13, sequenceid=73, filesize=18.5 K 2024-12-02T14:12:23,627 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd:info 2024-12-02T14:12:23,628 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=7.36 KB/7532 for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd in 33ms, sequenceid=73, compaction requested=true 2024-12-02T14:12:23,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd: 2024-12-02T14:12:23,628 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.7 K, sizeToCheck=16.0 K 2024-12-02T14:12:23,628 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:23,628 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/3164a0b4903a42d19696045380bdb66c because midkey is the same as first or last row 2024-12-02T14:12:23,628 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T14:12:23,628 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:23,628 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:12:23,629 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 76474 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T14:12:23,630 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1541): 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info is initiating minor compaction (all files) 2024-12-02T14:12:23,630 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info in TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:23,630 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/3164a0b4903a42d19696045380bdb66c, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/070d7860816a4dcba762410386ce8128, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/364ca129cb8e4761bdbd692536660af6] into tmpdir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp, totalSize=74.7 K 2024-12-02T14:12:23,630 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3164a0b4903a42d19696045380bdb66c, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1733148741426 2024-12-02T14:12:23,630 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 070d7860816a4dcba762410386ce8128, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1733148743538 2024-12-02T14:12:23,631 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 364ca129cb8e4761bdbd692536660af6, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733148743566 2024-12-02T14:12:23,643 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd#info#compaction#60 average throughput is 58.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:12:23,643 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/b0814090836c4a72ac037245f65b3322 is 1080, key is row0001/info:/1733148741426/Put/seqid=0 2024-12-02T14:12:23,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741843_1019 (size=66689) 2024-12-02T14:12:23,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741843_1019 (size=66689) 2024-12-02T14:12:23,657 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/b0814090836c4a72ac037245f65b3322 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/b0814090836c4a72ac037245f65b3322 2024-12-02T14:12:23,664 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info of 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd into b0814090836c4a72ac037245f65b3322(size=65.1 K), total size for store is 65.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:12:23,664 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd: 2024-12-02T14:12:23,665 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd., storeName=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info, priority=13, startTime=1733148743628; duration=0sec 2024-12-02T14:12:23,665 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-12-02T14:12:23,665 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:23,665 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/b0814090836c4a72ac037245f65b3322 because midkey is the same as first or last row 2024-12-02T14:12:23,665 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-12-02T14:12:23,665 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:23,665 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/b0814090836c4a72ac037245f65b3322 because midkey is the same as first or last row 2024-12-02T14:12:23,665 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.1 K, sizeToCheck=16.0 K 2024-12-02T14:12:23,665 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:23,665 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/b0814090836c4a72ac037245f65b3322 because midkey is the same as first or last row 2024-12-02T14:12:23,665 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:23,665 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd:info 2024-12-02T14:12:23,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:23,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:24,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:24,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:24,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:25,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:25,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-02T14:12:25,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/9160f31a90ab42acac59cb5a5292b96e is 1080, key is row0058/info:/1733148743597/Put/seqid=0 2024-12-02T14:12:25,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741844_1020 (size=13586) 2024-12-02T14:12:25,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741844_1020 (size=13586) 2024-12-02T14:12:25,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/9160f31a90ab42acac59cb5a5292b96e 2024-12-02T14:12:25,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/9160f31a90ab42acac59cb5a5292b96e as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9160f31a90ab42acac59cb5a5292b96e 2024-12-02T14:12:25,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9160f31a90ab42acac59cb5a5292b96e, entries=8, sequenceid=86, filesize=13.3 K 2024-12-02T14:12:25,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=10.51 KB/10760 for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd in 23ms, sequenceid=86, compaction requested=false 2024-12-02T14:12:25,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd: 2024-12-02T14:12:25,640 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-12-02T14:12:25,640 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:25,640 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/b0814090836c4a72ac037245f65b3322 because midkey is the same as first or last row 2024-12-02T14:12:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:25,640 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-02T14:12:25,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/143bb38b9e8a431f998495ed93e9cf94 is 1080, key is row0066/info:/1733148745618/Put/seqid=0 2024-12-02T14:12:25,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741845_1021 (size=16817) 2024-12-02T14:12:25,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741845_1021 (size=16817) 2024-12-02T14:12:25,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/143bb38b9e8a431f998495ed93e9cf94 2024-12-02T14:12:25,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/143bb38b9e8a431f998495ed93e9cf94 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/143bb38b9e8a431f998495ed93e9cf94 2024-12-02T14:12:25,661 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/143bb38b9e8a431f998495ed93e9cf94, entries=11, sequenceid=100, filesize=16.4 K 2024-12-02T14:12:25,662 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd in 22ms, sequenceid=100, compaction requested=true 2024-12-02T14:12:25,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd: 2024-12-02T14:12:25,662 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.8 K, sizeToCheck=16.0 K 2024-12-02T14:12:25,662 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:25,662 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/b0814090836c4a72ac037245f65b3322 because midkey is the same as first or last row 2024-12-02T14:12:25,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T14:12:25,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:25,663 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:12:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:25,663 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-02T14:12:25,664 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 97092 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T14:12:25,664 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1541): 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info is initiating minor compaction (all files) 2024-12-02T14:12:25,664 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info in TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:25,664 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/b0814090836c4a72ac037245f65b3322, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9160f31a90ab42acac59cb5a5292b96e, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/143bb38b9e8a431f998495ed93e9cf94] into tmpdir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp, totalSize=94.8 K 2024-12-02T14:12:25,665 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting b0814090836c4a72ac037245f65b3322, keycount=57, bloomtype=ROW, size=65.1 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733148741426 2024-12-02T14:12:25,665 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9160f31a90ab42acac59cb5a5292b96e, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1733148743597 2024-12-02T14:12:25,665 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 143bb38b9e8a431f998495ed93e9cf94, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1733148745618 2024-12-02T14:12:25,666 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/f863b64279be44d9a83d1671e3b3bc04 is 1080, key is row0077/info:/1733148745641/Put/seqid=0 2024-12-02T14:12:25,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741846_1022 (size=16817) 2024-12-02T14:12:25,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741846_1022 (size=16817) 2024-12-02T14:12:25,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/f863b64279be44d9a83d1671e3b3bc04 2024-12-02T14:12:25,678 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd#info#compaction#64 average throughput is 26.00 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:12:25,680 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/ebe1e4cd545548f09647aab4b5c6e1fb is 1080, key is row0001/info:/1733148741426/Put/seqid=0 2024-12-02T14:12:25,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/f863b64279be44d9a83d1671e3b3bc04 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/f863b64279be44d9a83d1671e3b3bc04 2024-12-02T14:12:25,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741847_1023 (size=87327) 2024-12-02T14:12:25,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741847_1023 (size=87327) 2024-12-02T14:12:25,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/f863b64279be44d9a83d1671e3b3bc04, entries=11, sequenceid=114, filesize=16.4 K 2024-12-02T14:12:25,687 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd in 24ms, sequenceid=114, compaction requested=false 2024-12-02T14:12:25,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd: 2024-12-02T14:12:25,688 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.2 K, sizeToCheck=16.0 K 2024-12-02T14:12:25,688 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:25,688 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/b0814090836c4a72ac037245f65b3322 because midkey is the same as first or last row 2024-12-02T14:12:25,689 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/ebe1e4cd545548f09647aab4b5c6e1fb as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/ebe1e4cd545548f09647aab4b5c6e1fb 2024-12-02T14:12:25,695 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info of 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd into ebe1e4cd545548f09647aab4b5c6e1fb(size=85.3 K), total size for store is 101.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:12:25,695 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd: 2024-12-02T14:12:25,695 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd., storeName=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info, priority=13, startTime=1733148745662; duration=0sec 2024-12-02T14:12:25,695 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=101.7 K, sizeToCheck=16.0 K 2024-12-02T14:12:25,695 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:25,695 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=101.7 K, sizeToCheck=16.0 K 2024-12-02T14:12:25,695 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:25,695 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=101.7 K, sizeToCheck=16.0 K 2024-12-02T14:12:25,695 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-02T14:12:25,696 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:25,696 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:25,696 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd:info 2024-12-02T14:12:25,697 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46157 {}] assignment.AssignmentManager(1363): Split request from a3a61c9ba14f,43655,1733148730445, parent={ENCODED => 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, NAME => 'TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-02T14:12:25,701 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46157 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:25,705 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46157 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, daughterA=25d15c8b388f7ecb8891f3b8d1e1918c, daughterB=a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:25,706 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, daughterA=25d15c8b388f7ecb8891f3b8d1e1918c, daughterB=a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:25,706 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, daughterA=25d15c8b388f7ecb8891f3b8d1e1918c, daughterB=a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:25,706 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, daughterA=25d15c8b388f7ecb8891f3b8d1e1918c, daughterB=a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:25,712 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, UNASSIGN}] 2024-12-02T14:12:25,713 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, UNASSIGN 2024-12-02T14:12:25,715 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, regionState=CLOSING, regionLocation=a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:25,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, UNASSIGN because future has completed 2024-12-02T14:12:25,717 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-02T14:12:25,718 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, server=a3a61c9ba14f,43655,1733148730445}] 2024-12-02T14:12:25,876 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:25,876 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-02T14:12:25,877 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, disabling compactions & flushes 2024-12-02T14:12:25,877 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:25,877 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:25,877 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. after waiting 0 ms 2024-12-02T14:12:25,877 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:25,877 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-02T14:12:25,883 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/24948f0340a144bfb04e224a38ab523e is 1080, key is row0088/info:/1733148745664/Put/seqid=0 2024-12-02T14:12:25,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741848_1024 (size=14663) 2024-12-02T14:12:25,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741848_1024 (size=14663) 2024-12-02T14:12:25,891 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/24948f0340a144bfb04e224a38ab523e 2024-12-02T14:12:25,897 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/.tmp/info/24948f0340a144bfb04e224a38ab523e as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/24948f0340a144bfb04e224a38ab523e 2024-12-02T14:12:25,901 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/24948f0340a144bfb04e224a38ab523e, entries=9, sequenceid=127, filesize=14.3 K 2024-12-02T14:12:25,903 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd in 25ms, sequenceid=127, compaction requested=true 2024-12-02T14:12:25,904 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/46a23d36d23848189c546d49596bf08c, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9a1cdf4084e244599283ecfd11c2f866, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/3164a0b4903a42d19696045380bdb66c, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9c9f81a8704449729f1846578781d47c, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/070d7860816a4dcba762410386ce8128, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/b0814090836c4a72ac037245f65b3322, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/364ca129cb8e4761bdbd692536660af6, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9160f31a90ab42acac59cb5a5292b96e, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/143bb38b9e8a431f998495ed93e9cf94] to archive 2024-12-02T14:12:25,905 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T14:12:25,906 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/46a23d36d23848189c546d49596bf08c to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/46a23d36d23848189c546d49596bf08c 2024-12-02T14:12:25,908 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9a1cdf4084e244599283ecfd11c2f866 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9a1cdf4084e244599283ecfd11c2f866 2024-12-02T14:12:25,909 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/3164a0b4903a42d19696045380bdb66c to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/3164a0b4903a42d19696045380bdb66c 2024-12-02T14:12:25,910 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9c9f81a8704449729f1846578781d47c to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9c9f81a8704449729f1846578781d47c 2024-12-02T14:12:25,911 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/070d7860816a4dcba762410386ce8128 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/070d7860816a4dcba762410386ce8128 2024-12-02T14:12:25,912 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/b0814090836c4a72ac037245f65b3322 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/b0814090836c4a72ac037245f65b3322 2024-12-02T14:12:25,913 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/364ca129cb8e4761bdbd692536660af6 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/364ca129cb8e4761bdbd692536660af6 2024-12-02T14:12:25,914 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9160f31a90ab42acac59cb5a5292b96e to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/9160f31a90ab42acac59cb5a5292b96e 2024-12-02T14:12:25,915 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/143bb38b9e8a431f998495ed93e9cf94 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/143bb38b9e8a431f998495ed93e9cf94 2024-12-02T14:12:25,920 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-12-02T14:12:25,921 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. 2024-12-02T14:12:25,921 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd: Waiting for close lock at 1733148745877Running coprocessor pre-close hooks at 1733148745877Disabling compacts and flushes for region at 1733148745877Disabling writes for close at 1733148745877Obtaining lock to block concurrent updates at 1733148745877Preparing flush snapshotting stores in 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd at 1733148745877Finished memstore snapshotting TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd., syncing WAL and waiting on mvcc, flushsize=dataSize=9684, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1733148745878 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. at 1733148745879 (+1 ms)Flushing 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info: creating writer at 1733148745879Flushing 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info: appending metadata at 1733148745883 (+4 ms)Flushing 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info: closing flushed file at 1733148745883Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d0ee63: reopening flushed file at 1733148745896 (+13 ms)Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd in 25ms, sequenceid=127, compaction requested=true at 1733148745903 (+7 ms)Writing region close event to WAL at 1733148745917 (+14 ms)Running coprocessor post-close hooks at 1733148745921 (+4 ms)Closed at 1733148745921 2024-12-02T14:12:25,923 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:25,924 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, regionState=CLOSED 2024-12-02T14:12:25,926 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, server=a3a61c9ba14f,43655,1733148730445 because future has completed 2024-12-02T14:12:25,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-02T14:12:25,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, server=a3a61c9ba14f,43655,1733148730445 in 209 msec 2024-12-02T14:12:25,931 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-02T14:12:25,931 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, UNASSIGN in 217 msec 2024-12-02T14:12:25,938 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:25,941 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, threads=3 2024-12-02T14:12:25,943 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/24948f0340a144bfb04e224a38ab523e for region: 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:25,943 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/ebe1e4cd545548f09647aab4b5c6e1fb for region: 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:25,943 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/f863b64279be44d9a83d1671e3b3bc04 for region: 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:25,952 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/f863b64279be44d9a83d1671e3b3bc04, top=true 2024-12-02T14:12:25,952 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/24948f0340a144bfb04e224a38ab523e, top=true 2024-12-02T14:12:25,962 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-f863b64279be44d9a83d1671e3b3bc04 for child: a51540fa60a0b6728b578258b2a10cee, parent: 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:25,962 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/f863b64279be44d9a83d1671e3b3bc04 for region: 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:25,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741849_1025 (size=27) 2024-12-02T14:12:25,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741849_1025 (size=27) 2024-12-02T14:12:25,964 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-24948f0340a144bfb04e224a38ab523e for child: a51540fa60a0b6728b578258b2a10cee, parent: 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:25,964 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/24948f0340a144bfb04e224a38ab523e for region: 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:25,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741850_1026 (size=27) 2024-12-02T14:12:25,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741850_1026 (size=27) 2024-12-02T14:12:25,973 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/ebe1e4cd545548f09647aab4b5c6e1fb for region: 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:25,975 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 52f2fc79cbbe65c7cb5b4ed5f3e8c1cd Daughter A: [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/25d15c8b388f7ecb8891f3b8d1e1918c/info/ebe1e4cd545548f09647aab4b5c6e1fb.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd] storefiles, Daughter B: [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-24948f0340a144bfb04e224a38ab523e, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/ebe1e4cd545548f09647aab4b5c6e1fb.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-f863b64279be44d9a83d1671e3b3bc04] storefiles. 2024-12-02T14:12:25,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:25,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:25,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741851_1027 (size=71) 2024-12-02T14:12:25,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741851_1027 (size=71) 2024-12-02T14:12:25,988 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:25,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741852_1028 (size=71) 2024-12-02T14:12:25,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741852_1028 (size=71) 2024-12-02T14:12:26,000 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:26,010 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/25d15c8b388f7ecb8891f3b8d1e1918c/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-12-02T14:12:26,012 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-12-02T14:12:26,015 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733148746014"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733148746014"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733148746014"}]},"ts":"1733148746014"} 2024-12-02T14:12:26,015 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733148746014"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733148746014"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733148746014"}]},"ts":"1733148746014"} 2024-12-02T14:12:26,015 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733148746014"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733148746014"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733148746014"}]},"ts":"1733148746014"} 2024-12-02T14:12:26,033 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=25d15c8b388f7ecb8891f3b8d1e1918c, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a51540fa60a0b6728b578258b2a10cee, ASSIGN}] 2024-12-02T14:12:26,034 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=25d15c8b388f7ecb8891f3b8d1e1918c, ASSIGN 2024-12-02T14:12:26,035 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a51540fa60a0b6728b578258b2a10cee, ASSIGN 2024-12-02T14:12:26,036 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=25d15c8b388f7ecb8891f3b8d1e1918c, ASSIGN; state=SPLITTING_NEW, location=a3a61c9ba14f,43655,1733148730445; forceNewPlan=false, retain=false 2024-12-02T14:12:26,036 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a51540fa60a0b6728b578258b2a10cee, ASSIGN; state=SPLITTING_NEW, location=a3a61c9ba14f,43655,1733148730445; forceNewPlan=false, retain=false 2024-12-02T14:12:26,187 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=a51540fa60a0b6728b578258b2a10cee, regionState=OPENING, regionLocation=a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:26,187 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=25d15c8b388f7ecb8891f3b8d1e1918c, regionState=OPENING, regionLocation=a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:26,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a51540fa60a0b6728b578258b2a10cee, ASSIGN because future has completed 2024-12-02T14:12:26,193 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure a51540fa60a0b6728b578258b2a10cee, server=a3a61c9ba14f,43655,1733148730445}] 2024-12-02T14:12:26,194 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=25d15c8b388f7ecb8891f3b8d1e1918c, ASSIGN because future has completed 2024-12-02T14:12:26,196 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 25d15c8b388f7ecb8891f3b8d1e1918c, server=a3a61c9ba14f,43655,1733148730445}] 2024-12-02T14:12:26,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:26,349 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:26,349 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => a51540fa60a0b6728b578258b2a10cee, NAME => 'TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-02T14:12:26,349 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:26,349 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:12:26,349 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:26,349 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:26,351 INFO [StoreOpener-a51540fa60a0b6728b578258b2a10cee-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:26,352 INFO [StoreOpener-a51540fa60a0b6728b578258b2a10cee-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a51540fa60a0b6728b578258b2a10cee columnFamilyName info 2024-12-02T14:12:26,352 DEBUG [StoreOpener-a51540fa60a0b6728b578258b2a10cee-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:26,367 DEBUG [StoreOpener-a51540fa60a0b6728b578258b2a10cee-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-24948f0340a144bfb04e224a38ab523e 2024-12-02T14:12:26,373 DEBUG [StoreOpener-a51540fa60a0b6728b578258b2a10cee-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-f863b64279be44d9a83d1671e3b3bc04 2024-12-02T14:12:26,384 DEBUG [StoreOpener-a51540fa60a0b6728b578258b2a10cee-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/ebe1e4cd545548f09647aab4b5c6e1fb.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd->hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/ebe1e4cd545548f09647aab4b5c6e1fb-top 2024-12-02T14:12:26,385 INFO [StoreOpener-a51540fa60a0b6728b578258b2a10cee-1 {}] regionserver.HStore(327): Store=a51540fa60a0b6728b578258b2a10cee/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:12:26,385 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:26,386 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:26,388 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:26,388 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:26,388 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:26,390 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:26,391 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened a51540fa60a0b6728b578258b2a10cee; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=692353, jitterRate=-0.1196281760931015}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:12:26,391 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:26,392 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for a51540fa60a0b6728b578258b2a10cee: Running coprocessor pre-open hook at 1733148746349Writing region info on filesystem at 1733148746349Initializing all the Stores at 1733148746350 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148746350Cleaning up temporary data from old regions at 1733148746388 (+38 ms)Running coprocessor post-open hooks at 1733148746391 (+3 ms)Region opened successfully at 1733148746392 (+1 ms) 2024-12-02T14:12:26,393 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee., pid=12, masterSystemTime=1733148746345 2024-12-02T14:12:26,393 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store a51540fa60a0b6728b578258b2a10cee:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T14:12:26,393 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:12:26,393 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:26,394 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:26,394 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1541): a51540fa60a0b6728b578258b2a10cee/info is initiating minor compaction (all files) 2024-12-02T14:12:26,394 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a51540fa60a0b6728b578258b2a10cee/info in TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:26,395 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/ebe1e4cd545548f09647aab4b5c6e1fb.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd->hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/ebe1e4cd545548f09647aab4b5c6e1fb-top, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-f863b64279be44d9a83d1671e3b3bc04, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-24948f0340a144bfb04e224a38ab523e] into tmpdir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp, totalSize=116.0 K 2024-12-02T14:12:26,395 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:26,395 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:26,395 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c. 2024-12-02T14:12:26,395 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 25d15c8b388f7ecb8891f3b8d1e1918c, NAME => 'TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-02T14:12:26,396 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 25d15c8b388f7ecb8891f3b8d1e1918c 2024-12-02T14:12:26,396 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:12:26,396 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 25d15c8b388f7ecb8891f3b8d1e1918c 2024-12-02T14:12:26,396 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 25d15c8b388f7ecb8891f3b8d1e1918c 2024-12-02T14:12:26,396 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=a51540fa60a0b6728b578258b2a10cee, regionState=OPEN, openSeqNum=131, regionLocation=a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:26,396 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting ebe1e4cd545548f09647aab4b5c6e1fb.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, keycount=38, bloomtype=ROW, size=85.3 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1733148741426 2024-12-02T14:12:26,397 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-f863b64279be44d9a83d1671e3b3bc04, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733148745641 2024-12-02T14:12:26,397 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-24948f0340a144bfb04e224a38ab523e, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733148745664 2024-12-02T14:12:26,398 INFO [StoreOpener-25d15c8b388f7ecb8891f3b8d1e1918c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 25d15c8b388f7ecb8891f3b8d1e1918c 2024-12-02T14:12:26,398 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-02T14:12:26,399 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-02T14:12:26,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure a51540fa60a0b6728b578258b2a10cee, server=a3a61c9ba14f,43655,1733148730445 because future has completed 2024-12-02T14:12:26,399 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-12-02T14:12:26,399 INFO [StoreOpener-25d15c8b388f7ecb8891f3b8d1e1918c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 25d15c8b388f7ecb8891f3b8d1e1918c columnFamilyName info 2024-12-02T14:12:26,399 DEBUG [StoreOpener-25d15c8b388f7ecb8891f3b8d1e1918c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:26,415 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-02T14:12:26,415 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure a51540fa60a0b6728b578258b2a10cee, server=a3a61c9ba14f,43655,1733148730445 in 208 msec 2024-12-02T14:12:26,417 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a51540fa60a0b6728b578258b2a10cee, ASSIGN in 382 msec 2024-12-02T14:12:26,417 DEBUG [StoreOpener-25d15c8b388f7ecb8891f3b8d1e1918c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/25d15c8b388f7ecb8891f3b8d1e1918c/info/ebe1e4cd545548f09647aab4b5c6e1fb.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd->hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/ebe1e4cd545548f09647aab4b5c6e1fb-bottom 2024-12-02T14:12:26,417 INFO [StoreOpener-25d15c8b388f7ecb8891f3b8d1e1918c-1 {}] regionserver.HStore(327): Store=25d15c8b388f7ecb8891f3b8d1e1918c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:12:26,418 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 25d15c8b388f7ecb8891f3b8d1e1918c 2024-12-02T14:12:26,418 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/25d15c8b388f7ecb8891f3b8d1e1918c 2024-12-02T14:12:26,420 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/25d15c8b388f7ecb8891f3b8d1e1918c 2024-12-02T14:12:26,420 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/.tmp/info/fe17c9ba929d451ab1a632a7a7218f97 is 193, key is TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee./info:regioninfo/1733148746396/Put/seqid=0 2024-12-02T14:12:26,420 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 25d15c8b388f7ecb8891f3b8d1e1918c 2024-12-02T14:12:26,420 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 25d15c8b388f7ecb8891f3b8d1e1918c 2024-12-02T14:12:26,424 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 25d15c8b388f7ecb8891f3b8d1e1918c 2024-12-02T14:12:26,425 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 25d15c8b388f7ecb8891f3b8d1e1918c; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=839716, jitterRate=0.06775425374507904}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T14:12:26,425 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 25d15c8b388f7ecb8891f3b8d1e1918c 2024-12-02T14:12:26,425 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 25d15c8b388f7ecb8891f3b8d1e1918c: Running coprocessor pre-open hook at 1733148746396Writing region info on filesystem at 1733148746396Initializing all the Stores at 1733148746398 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148746398Cleaning up temporary data from old regions at 1733148746420 (+22 ms)Running coprocessor post-open hooks at 1733148746425 (+5 ms)Region opened successfully at 1733148746425 2024-12-02T14:12:26,427 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c., pid=13, masterSystemTime=1733148746345 2024-12-02T14:12:26,427 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 25d15c8b388f7ecb8891f3b8d1e1918c:info, priority=-2147483648, current under compaction store size is 2 2024-12-02T14:12:26,427 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-02T14:12:26,427 DEBUG [RS:0;a3a61c9ba14f:43655-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-02T14:12:26,428 INFO [RS:0;a3a61c9ba14f:43655-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c. 2024-12-02T14:12:26,428 DEBUG [RS:0;a3a61c9ba14f:43655-longCompactions-0 {}] regionserver.HStore(1541): 25d15c8b388f7ecb8891f3b8d1e1918c/info is initiating minor compaction (all files) 2024-12-02T14:12:26,429 INFO [RS:0;a3a61c9ba14f:43655-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 25d15c8b388f7ecb8891f3b8d1e1918c/info in TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c. 2024-12-02T14:12:26,429 INFO [RS:0;a3a61c9ba14f:43655-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/25d15c8b388f7ecb8891f3b8d1e1918c/info/ebe1e4cd545548f09647aab4b5c6e1fb.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd->hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/ebe1e4cd545548f09647aab4b5c6e1fb-bottom] into tmpdir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/25d15c8b388f7ecb8891f3b8d1e1918c/.tmp, totalSize=85.3 K 2024-12-02T14:12:26,430 DEBUG [RS:0;a3a61c9ba14f:43655-longCompactions-0 {}] compactions.Compactor(225): Compacting ebe1e4cd545548f09647aab4b5c6e1fb.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, keycount=38, bloomtype=ROW, size=85.3 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1733148741426 2024-12-02T14:12:26,431 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=25d15c8b388f7ecb8891f3b8d1e1918c, regionState=OPEN, openSeqNum=131, regionLocation=a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:26,431 DEBUG [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c. 2024-12-02T14:12:26,431 INFO [RS_OPEN_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c. 2024-12-02T14:12:26,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 25d15c8b388f7ecb8891f3b8d1e1918c, server=a3a61c9ba14f,43655,1733148730445 because future has completed 2024-12-02T14:12:26,436 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a51540fa60a0b6728b578258b2a10cee#info#compaction#67 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:12:26,437 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/fe085f92d6644c75964c1548f7f19f2c is 1080, key is row0062/info:/1733148743605/Put/seqid=0 2024-12-02T14:12:26,444 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-12-02T14:12:26,444 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 25d15c8b388f7ecb8891f3b8d1e1918c, server=a3a61c9ba14f,43655,1733148730445 in 243 msec 2024-12-02T14:12:26,449 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-12-02T14:12:26,449 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=25d15c8b388f7ecb8891f3b8d1e1918c, ASSIGN in 411 msec 2024-12-02T14:12:26,453 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd, daughterA=25d15c8b388f7ecb8891f3b8d1e1918c, daughterB=a51540fa60a0b6728b578258b2a10cee in 748 msec 2024-12-02T14:12:26,463 INFO [RS:0;a3a61c9ba14f:43655-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 25d15c8b388f7ecb8891f3b8d1e1918c#info#compaction#68 average throughput is 12.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:12:26,465 DEBUG [RS:0;a3a61c9ba14f:43655-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/25d15c8b388f7ecb8891f3b8d1e1918c/.tmp/info/6d4c027bc08149cb959a41aabae1e90c is 1080, key is row0001/info:/1733148741426/Put/seqid=0 2024-12-02T14:12:26,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741853_1029 (size=9882) 2024-12-02T14:12:26,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741853_1029 (size=9882) 2024-12-02T14:12:26,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/.tmp/info/fe17c9ba929d451ab1a632a7a7218f97 2024-12-02T14:12:26,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741854_1030 (size=42984) 2024-12-02T14:12:26,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741854_1030 (size=42984) 2024-12-02T14:12:26,490 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/fe085f92d6644c75964c1548f7f19f2c as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/fe085f92d6644c75964c1548f7f19f2c 2024-12-02T14:12:26,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741855_1031 (size=70862) 2024-12-02T14:12:26,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741855_1031 (size=70862) 2024-12-02T14:12:26,500 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a51540fa60a0b6728b578258b2a10cee/info of a51540fa60a0b6728b578258b2a10cee into fe085f92d6644c75964c1548f7f19f2c(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:12:26,500 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:26,500 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee., storeName=a51540fa60a0b6728b578258b2a10cee/info, priority=13, startTime=1733148746393; duration=0sec 2024-12-02T14:12:26,500 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:26,500 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a51540fa60a0b6728b578258b2a10cee:info 2024-12-02T14:12:26,502 DEBUG [RS:0;a3a61c9ba14f:43655-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/25d15c8b388f7ecb8891f3b8d1e1918c/.tmp/info/6d4c027bc08149cb959a41aabae1e90c as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/25d15c8b388f7ecb8891f3b8d1e1918c/info/6d4c027bc08149cb959a41aabae1e90c 2024-12-02T14:12:26,508 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/.tmp/ns/8d1869331a2648c99079296fff58f651 is 43, key is default/ns:d/1733148731312/Put/seqid=0 2024-12-02T14:12:26,509 INFO [RS:0;a3a61c9ba14f:43655-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 25d15c8b388f7ecb8891f3b8d1e1918c/info of 25d15c8b388f7ecb8891f3b8d1e1918c into 6d4c027bc08149cb959a41aabae1e90c(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:12:26,509 DEBUG [RS:0;a3a61c9ba14f:43655-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 25d15c8b388f7ecb8891f3b8d1e1918c: 2024-12-02T14:12:26,509 INFO [RS:0;a3a61c9ba14f:43655-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c., storeName=25d15c8b388f7ecb8891f3b8d1e1918c/info, priority=15, startTime=1733148746427; duration=0sec 2024-12-02T14:12:26,510 DEBUG [RS:0;a3a61c9ba14f:43655-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:26,510 DEBUG [RS:0;a3a61c9ba14f:43655-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 25d15c8b388f7ecb8891f3b8d1e1918c:info 2024-12-02T14:12:26,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741856_1032 (size=5153) 2024-12-02T14:12:26,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741856_1032 (size=5153) 2024-12-02T14:12:26,519 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/.tmp/ns/8d1869331a2648c99079296fff58f651 2024-12-02T14:12:26,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/.tmp/table/adec1057b71c4996acb2b19ed5731a38 is 65, key is TestLogRolling-testLogRolling/table:state/1733148731762/Put/seqid=0 2024-12-02T14:12:26,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741857_1033 (size=5340) 2024-12-02T14:12:26,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741857_1033 (size=5340) 2024-12-02T14:12:26,552 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/.tmp/table/adec1057b71c4996acb2b19ed5731a38 2024-12-02T14:12:26,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/.tmp/info/fe17c9ba929d451ab1a632a7a7218f97 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/info/fe17c9ba929d451ab1a632a7a7218f97 2024-12-02T14:12:26,565 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/info/fe17c9ba929d451ab1a632a7a7218f97, entries=30, sequenceid=17, filesize=9.7 K 2024-12-02T14:12:26,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/.tmp/ns/8d1869331a2648c99079296fff58f651 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/ns/8d1869331a2648c99079296fff58f651 2024-12-02T14:12:26,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/ns/8d1869331a2648c99079296fff58f651, entries=2, sequenceid=17, filesize=5.0 K 2024-12-02T14:12:26,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/.tmp/table/adec1057b71c4996acb2b19ed5731a38 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/table/adec1057b71c4996acb2b19ed5731a38 2024-12-02T14:12:26,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/table/adec1057b71c4996acb2b19ed5731a38, entries=2, sequenceid=17, filesize=5.2 K 2024-12-02T14:12:26,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 182ms, sequenceid=17, compaction requested=false 2024-12-02T14:12:26,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-02T14:12:26,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:26,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:27,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:35960 deadline: 1733148757684, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. is not online on a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:27,713 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd., hostname=a3a61c9ba14f,43655,1733148730445, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd., hostname=a3a61c9ba14f,43655,1733148730445, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. is not online on a3a61c9ba14f,43655,1733148730445 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T14:12:27,714 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd., hostname=a3a61c9ba14f,43655,1733148730445, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd. is not online on a3a61c9ba14f,43655,1733148730445 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T14:12:27,714 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733148731392.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd., hostname=a3a61c9ba14f,43655,1733148730445, seqNum=2 from cache 2024-12-02T14:12:27,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:27,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:28,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:28,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:28,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:29,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:29,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:29,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:30,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:30,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,940 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,941 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,941 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,941 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,941 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,946 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:30,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:30,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:31,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:31,460 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-02T14:12:31,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,463 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,494 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,494 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,494 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,496 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-02T14:12:31,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:31,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:32,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:32,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:32,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:33,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:33,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:33,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:34,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:34,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:34,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:35,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:35,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:35,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:36,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:36,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:36,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:37,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:37,804 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee., hostname=a3a61c9ba14f,43655,1733148730445, seqNum=131] 2024-12-02T14:12:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:37,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-02T14:12:37,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/be01ebfa1b434fa38a931f3dc79d8936 is 1080, key is row0097/info:/1733148757805/Put/seqid=0 2024-12-02T14:12:37,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741858_1034 (size=12516) 2024-12-02T14:12:37,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741858_1034 (size=12516) 2024-12-02T14:12:37,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/be01ebfa1b434fa38a931f3dc79d8936 2024-12-02T14:12:37,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/be01ebfa1b434fa38a931f3dc79d8936 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/be01ebfa1b434fa38a931f3dc79d8936 2024-12-02T14:12:37,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/be01ebfa1b434fa38a931f3dc79d8936, entries=7, sequenceid=141, filesize=12.2 K 2024-12-02T14:12:37,844 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for a51540fa60a0b6728b578258b2a10cee in 25ms, sequenceid=141, compaction requested=false 2024-12-02T14:12:37,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:37,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:37,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-02T14:12:37,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/847e067e5f2f490fa03c357c37064923 is 1080, key is row0104/info:/1733148757820/Put/seqid=0 2024-12-02T14:12:37,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741859_1035 (size=19000) 2024-12-02T14:12:37,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741859_1035 (size=19000) 2024-12-02T14:12:37,893 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/847e067e5f2f490fa03c357c37064923 2024-12-02T14:12:37,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/847e067e5f2f490fa03c357c37064923 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/847e067e5f2f490fa03c357c37064923 2024-12-02T14:12:37,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/847e067e5f2f490fa03c357c37064923, entries=13, sequenceid=157, filesize=18.6 K 2024-12-02T14:12:37,913 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for a51540fa60a0b6728b578258b2a10cee in 65ms, sequenceid=157, compaction requested=true 2024-12-02T14:12:37,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:37,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a51540fa60a0b6728b578258b2a10cee:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T14:12:37,913 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:12:37,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:37,915 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74500 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T14:12:37,915 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1541): a51540fa60a0b6728b578258b2a10cee/info is initiating minor compaction (all files) 2024-12-02T14:12:37,915 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a51540fa60a0b6728b578258b2a10cee/info in TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:37,915 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/fe085f92d6644c75964c1548f7f19f2c, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/be01ebfa1b434fa38a931f3dc79d8936, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/847e067e5f2f490fa03c357c37064923] into tmpdir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp, totalSize=72.8 K 2024-12-02T14:12:37,916 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting fe085f92d6644c75964c1548f7f19f2c, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733148743605 2024-12-02T14:12:37,916 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting be01ebfa1b434fa38a931f3dc79d8936, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1733148757805 2024-12-02T14:12:37,917 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 847e067e5f2f490fa03c357c37064923, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733148757820 2024-12-02T14:12:37,945 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a51540fa60a0b6728b578258b2a10cee#info#compaction#73 average throughput is 56.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:12:37,946 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/5abce2c24d0f41c0a949a684629d1448 is 1080, key is row0062/info:/1733148743605/Put/seqid=0 2024-12-02T14:12:37,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741860_1036 (size=64714) 2024-12-02T14:12:37,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741860_1036 (size=64714) 2024-12-02T14:12:37,969 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/5abce2c24d0f41c0a949a684629d1448 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/5abce2c24d0f41c0a949a684629d1448 2024-12-02T14:12:37,977 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a51540fa60a0b6728b578258b2a10cee/info of a51540fa60a0b6728b578258b2a10cee into 5abce2c24d0f41c0a949a684629d1448(size=63.2 K), total size for store is 63.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:12:37,978 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:37,978 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee., storeName=a51540fa60a0b6728b578258b2a10cee/info, priority=13, startTime=1733148757913; duration=0sec 2024-12-02T14:12:37,978 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:37,978 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a51540fa60a0b6728b578258b2a10cee:info 2024-12-02T14:12:37,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:37,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:38,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:38,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:38,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:39,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:39,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:39,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-02T14:12:39,900 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/3e08ad88132d4cb5bc96c749dec80a0f is 1080, key is row0117/info:/1733148757850/Put/seqid=0 2024-12-02T14:12:39,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741861_1037 (size=19000) 2024-12-02T14:12:39,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741861_1037 (size=19000) 2024-12-02T14:12:39,906 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/3e08ad88132d4cb5bc96c749dec80a0f 2024-12-02T14:12:39,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/3e08ad88132d4cb5bc96c749dec80a0f as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/3e08ad88132d4cb5bc96c749dec80a0f 2024-12-02T14:12:39,917 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/3e08ad88132d4cb5bc96c749dec80a0f, entries=13, sequenceid=174, filesize=18.6 K 2024-12-02T14:12:39,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for a51540fa60a0b6728b578258b2a10cee in 22ms, sequenceid=174, compaction requested=false 2024-12-02T14:12:39,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:39,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:39,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-02T14:12:39,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/63a3508559e9488da49a2302866984a5 is 1080, key is row0130/info:/1733148759897/Put/seqid=0 2024-12-02T14:12:39,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741862_1038 (size=16828) 2024-12-02T14:12:39,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741862_1038 (size=16828) 2024-12-02T14:12:39,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=188 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/63a3508559e9488da49a2302866984a5 2024-12-02T14:12:39,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/63a3508559e9488da49a2302866984a5 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/63a3508559e9488da49a2302866984a5 2024-12-02T14:12:39,938 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/63a3508559e9488da49a2302866984a5, entries=11, sequenceid=188, filesize=16.4 K 2024-12-02T14:12:39,939 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for a51540fa60a0b6728b578258b2a10cee in 21ms, sequenceid=188, compaction requested=true 2024-12-02T14:12:39,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:39,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a51540fa60a0b6728b578258b2a10cee:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T14:12:39,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:39,939 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:12:39,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:39,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-02T14:12:39,941 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 100542 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T14:12:39,941 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1541): a51540fa60a0b6728b578258b2a10cee/info is initiating minor compaction (all files) 2024-12-02T14:12:39,941 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a51540fa60a0b6728b578258b2a10cee/info in TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:39,941 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/5abce2c24d0f41c0a949a684629d1448, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/3e08ad88132d4cb5bc96c749dec80a0f, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/63a3508559e9488da49a2302866984a5] into tmpdir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp, totalSize=98.2 K 2024-12-02T14:12:39,941 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5abce2c24d0f41c0a949a684629d1448, keycount=55, bloomtype=ROW, size=63.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733148743605 2024-12-02T14:12:39,942 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3e08ad88132d4cb5bc96c749dec80a0f, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1733148757850 2024-12-02T14:12:39,942 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 63a3508559e9488da49a2302866984a5, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1733148759897 2024-12-02T14:12:39,944 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/cc746db530dd4b7583940014253058ab is 1080, key is row0141/info:/1733148759919/Put/seqid=0 2024-12-02T14:12:39,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741863_1039 (size=17906) 2024-12-02T14:12:39,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741863_1039 (size=17906) 2024-12-02T14:12:39,950 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/cc746db530dd4b7583940014253058ab 2024-12-02T14:12:39,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/cc746db530dd4b7583940014253058ab as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/cc746db530dd4b7583940014253058ab 2024-12-02T14:12:39,958 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a51540fa60a0b6728b578258b2a10cee#info#compaction#77 average throughput is 40.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:12:39,958 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/94622ae5b2f3426a84c63c0e56a2eb65 is 1080, key is row0062/info:/1733148743605/Put/seqid=0 2024-12-02T14:12:39,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741864_1040 (size=90765) 2024-12-02T14:12:39,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741864_1040 (size=90765) 2024-12-02T14:12:39,963 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/cc746db530dd4b7583940014253058ab, entries=12, sequenceid=203, filesize=17.5 K 2024-12-02T14:12:39,964 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=8.41 KB/8608 for a51540fa60a0b6728b578258b2a10cee in 23ms, sequenceid=203, compaction requested=false 2024-12-02T14:12:39,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:39,967 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/94622ae5b2f3426a84c63c0e56a2eb65 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/94622ae5b2f3426a84c63c0e56a2eb65 2024-12-02T14:12:39,973 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a51540fa60a0b6728b578258b2a10cee/info of a51540fa60a0b6728b578258b2a10cee into 94622ae5b2f3426a84c63c0e56a2eb65(size=88.6 K), total size for store is 106.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:12:39,973 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:39,973 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee., storeName=a51540fa60a0b6728b578258b2a10cee/info, priority=13, startTime=1733148759939; duration=0sec 2024-12-02T14:12:39,973 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:39,973 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a51540fa60a0b6728b578258b2a10cee:info 2024-12-02T14:12:39,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:39,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:40,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:40,388 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T14:12:40,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:40,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:41,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:41,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:41,961 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-02T14:12:41,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/996e7de9b3bb4c8e88b5c7aa5633a9d7 is 1080, key is row0153/info:/1733148759941/Put/seqid=0 2024-12-02T14:12:41,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741865_1041 (size=14672) 2024-12-02T14:12:41,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741865_1041 (size=14672) 2024-12-02T14:12:41,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/996e7de9b3bb4c8e88b5c7aa5633a9d7 2024-12-02T14:12:41,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/996e7de9b3bb4c8e88b5c7aa5633a9d7 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/996e7de9b3bb4c8e88b5c7aa5633a9d7 2024-12-02T14:12:41,985 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/996e7de9b3bb4c8e88b5c7aa5633a9d7, entries=9, sequenceid=216, filesize=14.3 K 2024-12-02T14:12:41,986 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=11.56 KB/11836 for a51540fa60a0b6728b578258b2a10cee in 25ms, sequenceid=216, compaction requested=true 2024-12-02T14:12:41,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:41,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a51540fa60a0b6728b578258b2a10cee:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T14:12:41,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:41,986 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:12:41,987 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T14:12:41,987 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1541): a51540fa60a0b6728b578258b2a10cee/info is initiating minor compaction (all files) 2024-12-02T14:12:41,987 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a51540fa60a0b6728b578258b2a10cee/info in TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:41,987 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/94622ae5b2f3426a84c63c0e56a2eb65, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/cc746db530dd4b7583940014253058ab, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/996e7de9b3bb4c8e88b5c7aa5633a9d7] into tmpdir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp, totalSize=120.5 K 2024-12-02T14:12:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:41,987 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-02T14:12:41,987 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 94622ae5b2f3426a84c63c0e56a2eb65, keycount=79, bloomtype=ROW, size=88.6 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1733148743605 2024-12-02T14:12:41,988 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting cc746db530dd4b7583940014253058ab, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1733148759919 2024-12-02T14:12:41,988 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 996e7de9b3bb4c8e88b5c7aa5633a9d7, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733148759941 2024-12-02T14:12:41,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:41,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:41,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/4bf229f6f9f344b5bcfa2b810dd1b162 is 1080, key is row0162/info:/1733148761963/Put/seqid=0 2024-12-02T14:12:41,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741866_1042 (size=17906) 2024-12-02T14:12:41,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741866_1042 (size=17906) 2024-12-02T14:12:41,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/4bf229f6f9f344b5bcfa2b810dd1b162 2024-12-02T14:12:42,000 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a51540fa60a0b6728b578258b2a10cee#info#compaction#80 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:12:42,000 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/2e3fa457d02843ceaf838749b9075152 is 1080, key is row0062/info:/1733148743605/Put/seqid=0 2024-12-02T14:12:42,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/4bf229f6f9f344b5bcfa2b810dd1b162 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/4bf229f6f9f344b5bcfa2b810dd1b162 2024-12-02T14:12:42,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741867_1043 (size=113509) 2024-12-02T14:12:42,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741867_1043 (size=113509) 2024-12-02T14:12:42,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/4bf229f6f9f344b5bcfa2b810dd1b162, entries=12, sequenceid=231, filesize=17.5 K 2024-12-02T14:12:42,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=9.46 KB/9684 for a51540fa60a0b6728b578258b2a10cee in 20ms, sequenceid=231, compaction requested=false 2024-12-02T14:12:42,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:42,009 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/2e3fa457d02843ceaf838749b9075152 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/2e3fa457d02843ceaf838749b9075152 2024-12-02T14:12:42,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:42,009 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-02T14:12:42,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/7236591e624d4e0eb10bac5b5259685a is 1080, key is row0174/info:/1733148761988/Put/seqid=0 2024-12-02T14:12:42,015 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a51540fa60a0b6728b578258b2a10cee/info of a51540fa60a0b6728b578258b2a10cee into 2e3fa457d02843ceaf838749b9075152(size=110.8 K), total size for store is 128.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:12:42,015 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:42,015 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee., storeName=a51540fa60a0b6728b578258b2a10cee/info, priority=13, startTime=1733148761986; duration=0sec 2024-12-02T14:12:42,015 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:42,015 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a51540fa60a0b6728b578258b2a10cee:info 2024-12-02T14:12:42,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741868_1044 (size=16828) 2024-12-02T14:12:42,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741868_1044 (size=16828) 2024-12-02T14:12:42,019 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/7236591e624d4e0eb10bac5b5259685a 2024-12-02T14:12:42,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/7236591e624d4e0eb10bac5b5259685a as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/7236591e624d4e0eb10bac5b5259685a 2024-12-02T14:12:42,029 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/7236591e624d4e0eb10bac5b5259685a, entries=11, sequenceid=245, filesize=16.4 K 2024-12-02T14:12:42,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for a51540fa60a0b6728b578258b2a10cee in 20ms, sequenceid=245, compaction requested=true 2024-12-02T14:12:42,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:42,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a51540fa60a0b6728b578258b2a10cee:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T14:12:42,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:42,030 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:12:42,031 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 148243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T14:12:42,031 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1541): a51540fa60a0b6728b578258b2a10cee/info is initiating minor compaction (all files) 2024-12-02T14:12:42,031 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a51540fa60a0b6728b578258b2a10cee/info in TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:42,031 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/2e3fa457d02843ceaf838749b9075152, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/4bf229f6f9f344b5bcfa2b810dd1b162, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/7236591e624d4e0eb10bac5b5259685a] into tmpdir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp, totalSize=144.8 K 2024-12-02T14:12:42,031 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2e3fa457d02843ceaf838749b9075152, keycount=100, bloomtype=ROW, size=110.8 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733148743605 2024-12-02T14:12:42,032 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4bf229f6f9f344b5bcfa2b810dd1b162, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733148761963 2024-12-02T14:12:42,032 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7236591e624d4e0eb10bac5b5259685a, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733148761988 2024-12-02T14:12:42,040 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a51540fa60a0b6728b578258b2a10cee#info#compaction#82 average throughput is 126.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:12:42,041 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/5cc61df4fa804e52bbc2d8f33ab33370 is 1080, key is row0062/info:/1733148743605/Put/seqid=0 2024-12-02T14:12:42,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741869_1045 (size=138610) 2024-12-02T14:12:42,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741869_1045 (size=138610) 2024-12-02T14:12:42,049 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/5cc61df4fa804e52bbc2d8f33ab33370 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/5cc61df4fa804e52bbc2d8f33ab33370 2024-12-02T14:12:42,055 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a51540fa60a0b6728b578258b2a10cee/info of a51540fa60a0b6728b578258b2a10cee into 5cc61df4fa804e52bbc2d8f33ab33370(size=135.4 K), total size for store is 135.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:12:42,055 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:42,055 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee., storeName=a51540fa60a0b6728b578258b2a10cee/info, priority=13, startTime=1733148762030; duration=0sec 2024-12-02T14:12:42,055 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:42,055 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a51540fa60a0b6728b578258b2a10cee:info 2024-12-02T14:12:42,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:42,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:42,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:43,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:43,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:43,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:44,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:44,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-02T14:12:44,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/dbb5c336b02e490092cd6688978b0ef6 is 1080, key is row0185/info:/1733148762011/Put/seqid=0 2024-12-02T14:12:44,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741870_1046 (size=14673) 2024-12-02T14:12:44,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741870_1046 (size=14673) 2024-12-02T14:12:44,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/dbb5c336b02e490092cd6688978b0ef6 2024-12-02T14:12:44,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/dbb5c336b02e490092cd6688978b0ef6 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/dbb5c336b02e490092cd6688978b0ef6 2024-12-02T14:12:44,057 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/dbb5c336b02e490092cd6688978b0ef6, entries=9, sequenceid=259, filesize=14.3 K 2024-12-02T14:12:44,058 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=11.56 KB/11836 for a51540fa60a0b6728b578258b2a10cee in 28ms, sequenceid=259, compaction requested=false 2024-12-02T14:12:44,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:44,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:44,059 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-02T14:12:44,063 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/cb20b6ee1c234c4cb4d4675a3f5e4253 is 1080, key is row0194/info:/1733148764033/Put/seqid=0 2024-12-02T14:12:44,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741871_1047 (size=17918) 2024-12-02T14:12:44,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741871_1047 (size=17918) 2024-12-02T14:12:44,068 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/cb20b6ee1c234c4cb4d4675a3f5e4253 2024-12-02T14:12:44,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/cb20b6ee1c234c4cb4d4675a3f5e4253 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/cb20b6ee1c234c4cb4d4675a3f5e4253 2024-12-02T14:12:44,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/cb20b6ee1c234c4cb4d4675a3f5e4253, entries=12, sequenceid=274, filesize=17.5 K 2024-12-02T14:12:44,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for a51540fa60a0b6728b578258b2a10cee in 22ms, sequenceid=274, compaction requested=true 2024-12-02T14:12:44,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:44,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a51540fa60a0b6728b578258b2a10cee:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T14:12:44,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:44,081 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:12:44,083 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 171201 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T14:12:44,083 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1541): a51540fa60a0b6728b578258b2a10cee/info is initiating minor compaction (all files) 2024-12-02T14:12:44,083 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a51540fa60a0b6728b578258b2a10cee/info in TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:44,083 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/5cc61df4fa804e52bbc2d8f33ab33370, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/dbb5c336b02e490092cd6688978b0ef6, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/cb20b6ee1c234c4cb4d4675a3f5e4253] into tmpdir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp, totalSize=167.2 K 2024-12-02T14:12:44,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:44,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-02T14:12:44,083 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5cc61df4fa804e52bbc2d8f33ab33370, keycount=123, bloomtype=ROW, size=135.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733148743605 2024-12-02T14:12:44,084 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting dbb5c336b02e490092cd6688978b0ef6, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733148762011 2024-12-02T14:12:44,084 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting cb20b6ee1c234c4cb4d4675a3f5e4253, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733148764033 2024-12-02T14:12:44,087 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/83adf050213e4e7c87a9eab36afdcd01 is 1080, key is row0206/info:/1733148764060/Put/seqid=0 2024-12-02T14:12:44,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741872_1048 (size=17918) 2024-12-02T14:12:44,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741872_1048 (size=17918) 2024-12-02T14:12:44,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/83adf050213e4e7c87a9eab36afdcd01 2024-12-02T14:12:44,098 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a51540fa60a0b6728b578258b2a10cee#info#compaction#86 average throughput is 49.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:12:44,099 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/9f8353b5e59e4552bb0977e9575e09d9 is 1080, key is row0062/info:/1733148743605/Put/seqid=0 2024-12-02T14:12:44,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/83adf050213e4e7c87a9eab36afdcd01 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/83adf050213e4e7c87a9eab36afdcd01 2024-12-02T14:12:44,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741873_1049 (size=161367) 2024-12-02T14:12:44,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741873_1049 (size=161367) 2024-12-02T14:12:44,106 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/83adf050213e4e7c87a9eab36afdcd01, entries=12, sequenceid=289, filesize=17.5 K 2024-12-02T14:12:44,107 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for a51540fa60a0b6728b578258b2a10cee in 24ms, sequenceid=289, compaction requested=false 2024-12-02T14:12:44,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:44,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:44,514 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/9f8353b5e59e4552bb0977e9575e09d9 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/9f8353b5e59e4552bb0977e9575e09d9 2024-12-02T14:12:44,521 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a51540fa60a0b6728b578258b2a10cee/info of a51540fa60a0b6728b578258b2a10cee into 9f8353b5e59e4552bb0977e9575e09d9(size=157.6 K), total size for store is 175.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:12:44,521 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:44,521 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee., storeName=a51540fa60a0b6728b578258b2a10cee/info, priority=13, startTime=1733148764081; duration=0sec 2024-12-02T14:12:44,521 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:44,521 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a51540fa60a0b6728b578258b2a10cee:info 2024-12-02T14:12:44,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:44,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:45,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:45,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:45,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:46,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:46,103 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-02T14:12:46,111 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/951ed426a1c946798bedf176da943fd3 is 1080, key is row0218/info:/1733148764085/Put/seqid=0 2024-12-02T14:12:46,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741874_1050 (size=13602) 2024-12-02T14:12:46,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741874_1050 (size=13602) 2024-12-02T14:12:46,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/951ed426a1c946798bedf176da943fd3 2024-12-02T14:12:46,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/951ed426a1c946798bedf176da943fd3 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/951ed426a1c946798bedf176da943fd3 2024-12-02T14:12:46,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/951ed426a1c946798bedf176da943fd3, entries=8, sequenceid=301, filesize=13.3 K 2024-12-02T14:12:46,129 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=10.51 KB/10760 for a51540fa60a0b6728b578258b2a10cee in 26ms, sequenceid=301, compaction requested=true 2024-12-02T14:12:46,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:46,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a51540fa60a0b6728b578258b2a10cee:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T14:12:46,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:46,129 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:12:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:46,129 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-02T14:12:46,130 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192887 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T14:12:46,130 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1541): a51540fa60a0b6728b578258b2a10cee/info is initiating minor compaction (all files) 2024-12-02T14:12:46,130 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a51540fa60a0b6728b578258b2a10cee/info in TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:46,130 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/9f8353b5e59e4552bb0977e9575e09d9, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/83adf050213e4e7c87a9eab36afdcd01, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/951ed426a1c946798bedf176da943fd3] into tmpdir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp, totalSize=188.4 K 2024-12-02T14:12:46,131 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9f8353b5e59e4552bb0977e9575e09d9, keycount=144, bloomtype=ROW, size=157.6 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733148743605 2024-12-02T14:12:46,131 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 83adf050213e4e7c87a9eab36afdcd01, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1733148764060 2024-12-02T14:12:46,132 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 951ed426a1c946798bedf176da943fd3, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733148764085 2024-12-02T14:12:46,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/f3c7b69d661042348f08fd4203a823ee is 1080, key is row0226/info:/1733148766105/Put/seqid=0 2024-12-02T14:12:46,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741875_1051 (size=16839) 2024-12-02T14:12:46,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741875_1051 (size=16839) 2024-12-02T14:12:46,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/f3c7b69d661042348f08fd4203a823ee 2024-12-02T14:12:46,144 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a51540fa60a0b6728b578258b2a10cee#info#compaction#89 average throughput is 84.14 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:12:46,144 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/f4561656bb2b4661a6eb5fa6b4a0e6e7 is 1080, key is row0062/info:/1733148743605/Put/seqid=0 2024-12-02T14:12:46,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/f3c7b69d661042348f08fd4203a823ee as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/f3c7b69d661042348f08fd4203a823ee 2024-12-02T14:12:46,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/f3c7b69d661042348f08fd4203a823ee, entries=11, sequenceid=315, filesize=16.4 K 2024-12-02T14:12:46,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for a51540fa60a0b6728b578258b2a10cee in 29ms, sequenceid=315, compaction requested=false 2024-12-02T14:12:46,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:46,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43655 {}] regionserver.HRegion(8855): Flush requested on a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:46,159 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-02T14:12:46,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741876_1052 (size=183053) 2024-12-02T14:12:46,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741876_1052 (size=183053) 2024-12-02T14:12:46,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/36af9f12afa347cfa180ba71a40bb183 is 1080, key is row0237/info:/1733148766130/Put/seqid=0 2024-12-02T14:12:46,170 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/f4561656bb2b4661a6eb5fa6b4a0e6e7 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/f4561656bb2b4661a6eb5fa6b4a0e6e7 2024-12-02T14:12:46,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741877_1053 (size=15760) 2024-12-02T14:12:46,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741877_1053 (size=15760) 2024-12-02T14:12:46,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/36af9f12afa347cfa180ba71a40bb183 2024-12-02T14:12:46,177 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a51540fa60a0b6728b578258b2a10cee/info of a51540fa60a0b6728b578258b2a10cee into f4561656bb2b4661a6eb5fa6b4a0e6e7(size=178.8 K), total size for store is 195.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:12:46,177 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:46,177 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee., storeName=a51540fa60a0b6728b578258b2a10cee/info, priority=13, startTime=1733148766129; duration=0sec 2024-12-02T14:12:46,177 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:46,177 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a51540fa60a0b6728b578258b2a10cee:info 2024-12-02T14:12:46,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/36af9f12afa347cfa180ba71a40bb183 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/36af9f12afa347cfa180ba71a40bb183 2024-12-02T14:12:46,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/36af9f12afa347cfa180ba71a40bb183, entries=10, sequenceid=328, filesize=15.4 K 2024-12-02T14:12:46,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=10.51 KB/10760 for a51540fa60a0b6728b578258b2a10cee in 25ms, sequenceid=328, compaction requested=true 2024-12-02T14:12:46,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:46,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a51540fa60a0b6728b578258b2a10cee:info, priority=-2147483648, current under compaction store size is 1 2024-12-02T14:12:46,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:46,184 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-02T14:12:46,185 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 215652 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-02T14:12:46,185 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1541): a51540fa60a0b6728b578258b2a10cee/info is initiating minor compaction (all files) 2024-12-02T14:12:46,185 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a51540fa60a0b6728b578258b2a10cee/info in TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:46,185 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/f4561656bb2b4661a6eb5fa6b4a0e6e7, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/f3c7b69d661042348f08fd4203a823ee, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/36af9f12afa347cfa180ba71a40bb183] into tmpdir=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp, totalSize=210.6 K 2024-12-02T14:12:46,185 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting f4561656bb2b4661a6eb5fa6b4a0e6e7, keycount=164, bloomtype=ROW, size=178.8 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733148743605 2024-12-02T14:12:46,186 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting f3c7b69d661042348f08fd4203a823ee, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733148766105 2024-12-02T14:12:46,186 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] compactions.Compactor(225): Compacting 36af9f12afa347cfa180ba71a40bb183, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1733148766130 2024-12-02T14:12:46,197 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a51540fa60a0b6728b578258b2a10cee#info#compaction#91 average throughput is 63.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-02T14:12:46,197 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/872b2d84d02b46bba3b2bf780acba13f is 1080, key is row0062/info:/1733148743605/Put/seqid=0 2024-12-02T14:12:46,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741878_1054 (size=205891) 2024-12-02T14:12:46,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741878_1054 (size=205891) 2024-12-02T14:12:46,206 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/872b2d84d02b46bba3b2bf780acba13f as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/872b2d84d02b46bba3b2bf780acba13f 2024-12-02T14:12:46,212 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a51540fa60a0b6728b578258b2a10cee/info of a51540fa60a0b6728b578258b2a10cee into 872b2d84d02b46bba3b2bf780acba13f(size=201.1 K), total size for store is 201.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-02T14:12:46,212 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:46,212 INFO [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee., storeName=a51540fa60a0b6728b578258b2a10cee/info, priority=13, startTime=1733148766184; duration=0sec 2024-12-02T14:12:46,212 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-02T14:12:46,212 DEBUG [RS:0;a3a61c9ba14f:43655-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a51540fa60a0b6728b578258b2a10cee:info 2024-12-02T14:12:46,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:46,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:46,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:47,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:47,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:47,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:48,182 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-02T14:12:48,182 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C43655%2C1733148730445.1733148768182 2024-12-02T14:12:48,189 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,189 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,189 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,189 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,190 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,190 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/WALs/a3a61c9ba14f,43655,1733148730445/a3a61c9ba14f%2C43655%2C1733148730445.1733148730840 with entries=318, filesize=310.38 KB; new WAL /user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/WALs/a3a61c9ba14f,43655,1733148730445/a3a61c9ba14f%2C43655%2C1733148730445.1733148768182 2024-12-02T14:12:48,191 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42483:42483),(127.0.0.1/127.0.0.1:42231:42231)] 2024-12-02T14:12:48,191 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/WALs/a3a61c9ba14f,43655,1733148730445/a3a61c9ba14f%2C43655%2C1733148730445.1733148730840 is not closed yet, will try archiving it next time 2024-12-02T14:12:48,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741833_1009 (size=317837) 2024-12-02T14:12:48,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741833_1009 (size=317837) 2024-12-02T14:12:48,196 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-12-02T14:12:48,201 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/.tmp/info/e627bacb3025415dba2d7b6ab933a0a2 is 186, key is TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c./info:regioninfo/1733148746431/Put/seqid=0 2024-12-02T14:12:48,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741880_1056 (size=6153) 2024-12-02T14:12:48,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741880_1056 (size=6153) 2024-12-02T14:12:48,205 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/.tmp/info/e627bacb3025415dba2d7b6ab933a0a2 2024-12-02T14:12:48,210 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/.tmp/info/e627bacb3025415dba2d7b6ab933a0a2 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/info/e627bacb3025415dba2d7b6ab933a0a2 2024-12-02T14:12:48,215 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/info/e627bacb3025415dba2d7b6ab933a0a2, entries=5, sequenceid=21, filesize=6.0 K 2024-12-02T14:12:48,216 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=21, compaction requested=false 2024-12-02T14:12:48,216 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-02T14:12:48,216 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 25d15c8b388f7ecb8891f3b8d1e1918c: 2024-12-02T14:12:48,216 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing a51540fa60a0b6728b578258b2a10cee 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-02T14:12:48,221 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/bcffdb2ca19f47e2bc90309279d52143 is 1080, key is row0247/info:/1733148766161/Put/seqid=0 2024-12-02T14:12:48,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741881_1057 (size=15760) 2024-12-02T14:12:48,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741881_1057 (size=15760) 2024-12-02T14:12:48,227 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/bcffdb2ca19f47e2bc90309279d52143 2024-12-02T14:12:48,231 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/.tmp/info/bcffdb2ca19f47e2bc90309279d52143 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/bcffdb2ca19f47e2bc90309279d52143 2024-12-02T14:12:48,235 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/bcffdb2ca19f47e2bc90309279d52143, entries=10, sequenceid=343, filesize=15.4 K 2024-12-02T14:12:48,236 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=0 B/0 for a51540fa60a0b6728b578258b2a10cee in 20ms, sequenceid=343, compaction requested=false 2024-12-02T14:12:48,236 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for a51540fa60a0b6728b578258b2a10cee: 2024-12-02T14:12:48,237 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C43655%2C1733148730445.1733148768237 2024-12-02T14:12:48,241 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,241 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,241 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,241 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,241 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,241 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/WALs/a3a61c9ba14f,43655,1733148730445/a3a61c9ba14f%2C43655%2C1733148730445.1733148768182 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/WALs/a3a61c9ba14f,43655,1733148730445/a3a61c9ba14f%2C43655%2C1733148730445.1733148768237 2024-12-02T14:12:48,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741879_1055 (size=731) 2024-12-02T14:12:48,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741879_1055 (size=731) 2024-12-02T14:12:48,248 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/WALs/a3a61c9ba14f,43655,1733148730445/a3a61c9ba14f%2C43655%2C1733148730445.1733148730840 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/oldWALs/a3a61c9ba14f%2C43655%2C1733148730445.1733148730840 2024-12-02T14:12:48,248 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42231:42231),(127.0.0.1/127.0.0.1:42483:42483)] 2024-12-02T14:12:48,248 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T14:12:48,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T14:12:48,249 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:12:48,249 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/WALs/a3a61c9ba14f,43655,1733148730445/a3a61c9ba14f%2C43655%2C1733148730445.1733148768182 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/oldWALs/a3a61c9ba14f%2C43655%2C1733148730445.1733148768182 2024-12-02T14:12:48,249 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:12:48,249 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:48,249 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:48,249 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T14:12:48,249 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T14:12:48,249 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=756872570, stopped=false 2024-12-02T14:12:48,249 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a3a61c9ba14f,46157,1733148730404 2024-12-02T14:12:48,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:12:48,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:12:48,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:48,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:48,250 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:12:48,250 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:12:48,250 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:12:48,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:48,251 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a3a61c9ba14f,43655,1733148730445' ***** 2024-12-02T14:12:48,251 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T14:12:48,251 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:12:48,251 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T14:12:48,251 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T14:12:48,251 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:12:48,251 INFO [RS:0;a3a61c9ba14f:43655 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T14:12:48,251 INFO [RS:0;a3a61c9ba14f:43655 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T14:12:48,251 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(3091): Received CLOSE for 25d15c8b388f7ecb8891f3b8d1e1918c 2024-12-02T14:12:48,251 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(3091): Received CLOSE for a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:48,251 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(959): stopping server a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:48,251 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:12:48,251 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 25d15c8b388f7ecb8891f3b8d1e1918c, disabling compactions & flushes 2024-12-02T14:12:48,251 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c. 2024-12-02T14:12:48,251 INFO [RS:0;a3a61c9ba14f:43655 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a3a61c9ba14f:43655. 2024-12-02T14:12:48,251 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c. 2024-12-02T14:12:48,251 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c. after waiting 0 ms 2024-12-02T14:12:48,251 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c. 2024-12-02T14:12:48,252 DEBUG [RS:0;a3a61c9ba14f:43655 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:12:48,252 DEBUG [RS:0;a3a61c9ba14f:43655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:48,252 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T14:12:48,252 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T14:12:48,252 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T14:12:48,252 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T14:12:48,252 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-02T14:12:48,252 DEBUG [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 25d15c8b388f7ecb8891f3b8d1e1918c=TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c., a51540fa60a0b6728b578258b2a10cee=TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.} 2024-12-02T14:12:48,252 DEBUG [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 25d15c8b388f7ecb8891f3b8d1e1918c, a51540fa60a0b6728b578258b2a10cee 2024-12-02T14:12:48,252 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:12:48,252 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:12:48,252 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:12:48,252 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:12:48,252 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:12:48,252 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/25d15c8b388f7ecb8891f3b8d1e1918c/info/ebe1e4cd545548f09647aab4b5c6e1fb.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd->hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/ebe1e4cd545548f09647aab4b5c6e1fb-bottom] to archive 2024-12-02T14:12:48,253 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T14:12:48,254 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/25d15c8b388f7ecb8891f3b8d1e1918c/info/ebe1e4cd545548f09647aab4b5c6e1fb.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/25d15c8b388f7ecb8891f3b8d1e1918c/info/ebe1e4cd545548f09647aab4b5c6e1fb.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:48,254 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=a3a61c9ba14f:46157 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-02T14:12:48,255 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-02T14:12:48,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:48,260 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/25d15c8b388f7ecb8891f3b8d1e1918c/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-12-02T14:12:48,260 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-02T14:12:48,261 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c. 2024-12-02T14:12:48,261 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 25d15c8b388f7ecb8891f3b8d1e1918c: Waiting for close lock at 1733148768251Running coprocessor pre-close hooks at 1733148768251Disabling compacts and flushes for region at 1733148768251Disabling writes for close at 1733148768251Writing region close event to WAL at 1733148768257 (+6 ms)Running coprocessor post-close hooks at 1733148768261 (+4 ms)Closed at 1733148768261 2024-12-02T14:12:48,261 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T14:12:48,261 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:12:48,261 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733148745701.25d15c8b388f7ecb8891f3b8d1e1918c. 2024-12-02T14:12:48,261 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733148768252Running coprocessor pre-close hooks at 1733148768252Disabling compacts and flushes for region at 1733148768252Disabling writes for close at 1733148768252Writing region close event to WAL at 1733148768257 (+5 ms)Running coprocessor post-close hooks at 1733148768261 (+4 ms)Closed at 1733148768261 2024-12-02T14:12:48,261 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T14:12:48,261 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a51540fa60a0b6728b578258b2a10cee, disabling compactions & flushes 2024-12-02T14:12:48,261 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:48,261 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:48,261 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. after waiting 0 ms 2024-12-02T14:12:48,261 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:48,262 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/ebe1e4cd545548f09647aab4b5c6e1fb.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd->hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/52f2fc79cbbe65c7cb5b4ed5f3e8c1cd/info/ebe1e4cd545548f09647aab4b5c6e1fb-top, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-f863b64279be44d9a83d1671e3b3bc04, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/fe085f92d6644c75964c1548f7f19f2c, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-24948f0340a144bfb04e224a38ab523e, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/be01ebfa1b434fa38a931f3dc79d8936, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/5abce2c24d0f41c0a949a684629d1448, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/847e067e5f2f490fa03c357c37064923, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/3e08ad88132d4cb5bc96c749dec80a0f, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/94622ae5b2f3426a84c63c0e56a2eb65, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/63a3508559e9488da49a2302866984a5, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/cc746db530dd4b7583940014253058ab, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/2e3fa457d02843ceaf838749b9075152, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/996e7de9b3bb4c8e88b5c7aa5633a9d7, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/4bf229f6f9f344b5bcfa2b810dd1b162, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/5cc61df4fa804e52bbc2d8f33ab33370, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/7236591e624d4e0eb10bac5b5259685a, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/dbb5c336b02e490092cd6688978b0ef6, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/9f8353b5e59e4552bb0977e9575e09d9, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/cb20b6ee1c234c4cb4d4675a3f5e4253, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/83adf050213e4e7c87a9eab36afdcd01, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/f4561656bb2b4661a6eb5fa6b4a0e6e7, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/951ed426a1c946798bedf176da943fd3, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/f3c7b69d661042348f08fd4203a823ee, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/36af9f12afa347cfa180ba71a40bb183] to archive 2024-12-02T14:12:48,263 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-02T14:12:48,264 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/ebe1e4cd545548f09647aab4b5c6e1fb.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/ebe1e4cd545548f09647aab4b5c6e1fb.52f2fc79cbbe65c7cb5b4ed5f3e8c1cd 2024-12-02T14:12:48,265 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-f863b64279be44d9a83d1671e3b3bc04 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-f863b64279be44d9a83d1671e3b3bc04 2024-12-02T14:12:48,267 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/fe085f92d6644c75964c1548f7f19f2c to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/fe085f92d6644c75964c1548f7f19f2c 2024-12-02T14:12:48,268 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-24948f0340a144bfb04e224a38ab523e to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/TestLogRolling-testLogRolling=52f2fc79cbbe65c7cb5b4ed5f3e8c1cd-24948f0340a144bfb04e224a38ab523e 2024-12-02T14:12:48,269 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/be01ebfa1b434fa38a931f3dc79d8936 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/be01ebfa1b434fa38a931f3dc79d8936 2024-12-02T14:12:48,270 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/5abce2c24d0f41c0a949a684629d1448 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/5abce2c24d0f41c0a949a684629d1448 2024-12-02T14:12:48,271 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/847e067e5f2f490fa03c357c37064923 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/847e067e5f2f490fa03c357c37064923 2024-12-02T14:12:48,272 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/3e08ad88132d4cb5bc96c749dec80a0f to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/3e08ad88132d4cb5bc96c749dec80a0f 2024-12-02T14:12:48,273 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/94622ae5b2f3426a84c63c0e56a2eb65 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/94622ae5b2f3426a84c63c0e56a2eb65 2024-12-02T14:12:48,274 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/63a3508559e9488da49a2302866984a5 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/63a3508559e9488da49a2302866984a5 2024-12-02T14:12:48,275 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/cc746db530dd4b7583940014253058ab to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/cc746db530dd4b7583940014253058ab 2024-12-02T14:12:48,276 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/2e3fa457d02843ceaf838749b9075152 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/2e3fa457d02843ceaf838749b9075152 2024-12-02T14:12:48,277 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/996e7de9b3bb4c8e88b5c7aa5633a9d7 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/996e7de9b3bb4c8e88b5c7aa5633a9d7 2024-12-02T14:12:48,277 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/4bf229f6f9f344b5bcfa2b810dd1b162 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/4bf229f6f9f344b5bcfa2b810dd1b162 2024-12-02T14:12:48,278 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/5cc61df4fa804e52bbc2d8f33ab33370 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/5cc61df4fa804e52bbc2d8f33ab33370 2024-12-02T14:12:48,279 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/7236591e624d4e0eb10bac5b5259685a to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/7236591e624d4e0eb10bac5b5259685a 2024-12-02T14:12:48,280 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/dbb5c336b02e490092cd6688978b0ef6 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/dbb5c336b02e490092cd6688978b0ef6 2024-12-02T14:12:48,282 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/9f8353b5e59e4552bb0977e9575e09d9 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/9f8353b5e59e4552bb0977e9575e09d9 2024-12-02T14:12:48,283 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/cb20b6ee1c234c4cb4d4675a3f5e4253 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/cb20b6ee1c234c4cb4d4675a3f5e4253 2024-12-02T14:12:48,283 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/83adf050213e4e7c87a9eab36afdcd01 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/83adf050213e4e7c87a9eab36afdcd01 2024-12-02T14:12:48,284 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/f4561656bb2b4661a6eb5fa6b4a0e6e7 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/f4561656bb2b4661a6eb5fa6b4a0e6e7 2024-12-02T14:12:48,285 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/951ed426a1c946798bedf176da943fd3 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/951ed426a1c946798bedf176da943fd3 2024-12-02T14:12:48,286 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/f3c7b69d661042348f08fd4203a823ee to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/f3c7b69d661042348f08fd4203a823ee 2024-12-02T14:12:48,287 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/36af9f12afa347cfa180ba71a40bb183 to hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/archive/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/info/36af9f12afa347cfa180ba71a40bb183 2024-12-02T14:12:48,287 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [fe085f92d6644c75964c1548f7f19f2c=42984, be01ebfa1b434fa38a931f3dc79d8936=12516, 5abce2c24d0f41c0a949a684629d1448=64714, 847e067e5f2f490fa03c357c37064923=19000, 3e08ad88132d4cb5bc96c749dec80a0f=19000, 94622ae5b2f3426a84c63c0e56a2eb65=90765, 63a3508559e9488da49a2302866984a5=16828, cc746db530dd4b7583940014253058ab=17906, 2e3fa457d02843ceaf838749b9075152=113509, 996e7de9b3bb4c8e88b5c7aa5633a9d7=14672, 4bf229f6f9f344b5bcfa2b810dd1b162=17906, 5cc61df4fa804e52bbc2d8f33ab33370=138610, 7236591e624d4e0eb10bac5b5259685a=16828, dbb5c336b02e490092cd6688978b0ef6=14673, 9f8353b5e59e4552bb0977e9575e09d9=161367, cb20b6ee1c234c4cb4d4675a3f5e4253=17918, 83adf050213e4e7c87a9eab36afdcd01=17918, f4561656bb2b4661a6eb5fa6b4a0e6e7=183053, 951ed426a1c946798bedf176da943fd3=13602, f3c7b69d661042348f08fd4203a823ee=16839, 36af9f12afa347cfa180ba71a40bb183=15760] 2024-12-02T14:12:48,290 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/data/default/TestLogRolling-testLogRolling/a51540fa60a0b6728b578258b2a10cee/recovered.edits/346.seqid, newMaxSeqId=346, maxSeqId=130 2024-12-02T14:12:48,291 INFO [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:48,291 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a51540fa60a0b6728b578258b2a10cee: Waiting for close lock at 1733148768261Running coprocessor pre-close hooks at 1733148768261Disabling compacts and flushes for region at 1733148768261Disabling writes for close at 1733148768261Writing region close event to WAL at 1733148768287 (+26 ms)Running coprocessor post-close hooks at 1733148768291 (+4 ms)Closed at 1733148768291 2024-12-02T14:12:48,291 DEBUG [RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733148745701.a51540fa60a0b6728b578258b2a10cee. 2024-12-02T14:12:48,452 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(976): stopping server a3a61c9ba14f,43655,1733148730445; all regions closed. 2024-12-02T14:12:48,453 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,453 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,453 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,453 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,454 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741834_1010 (size=8107) 2024-12-02T14:12:48,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741834_1010 (size=8107) 2024-12-02T14:12:48,461 DEBUG [RS:0;a3a61c9ba14f:43655 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/oldWALs 2024-12-02T14:12:48,462 INFO [RS:0;a3a61c9ba14f:43655 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C43655%2C1733148730445.meta:.meta(num 1733148731257) 2024-12-02T14:12:48,462 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,462 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,463 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,463 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,463 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741882_1058 (size=780) 2024-12-02T14:12:48,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741882_1058 (size=780) 2024-12-02T14:12:48,467 DEBUG [RS:0;a3a61c9ba14f:43655 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/oldWALs 2024-12-02T14:12:48,467 INFO [RS:0;a3a61c9ba14f:43655 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C43655%2C1733148730445:(num 1733148768237) 2024-12-02T14:12:48,467 DEBUG [RS:0;a3a61c9ba14f:43655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:48,467 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:12:48,467 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:12:48,467 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.ChoreService(370): Chore service for: regionserver/a3a61c9ba14f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T14:12:48,467 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:12:48,467 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:12:48,467 INFO [RS:0;a3a61c9ba14f:43655 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43655 2024-12-02T14:12:48,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:12:48,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a3a61c9ba14f,43655,1733148730445 2024-12-02T14:12:48,468 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:12:48,469 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a3a61c9ba14f,43655,1733148730445] 2024-12-02T14:12:48,470 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a3a61c9ba14f,43655,1733148730445 already deleted, retry=false 2024-12-02T14:12:48,470 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a3a61c9ba14f,43655,1733148730445 expired; onlineServers=0 2024-12-02T14:12:48,470 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a3a61c9ba14f,46157,1733148730404' ***** 2024-12-02T14:12:48,470 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T14:12:48,470 INFO [M:0;a3a61c9ba14f:46157 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:12:48,470 INFO [M:0;a3a61c9ba14f:46157 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:12:48,470 DEBUG [M:0;a3a61c9ba14f:46157 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T14:12:48,470 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T14:12:48,470 DEBUG [M:0;a3a61c9ba14f:46157 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T14:12:48,470 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148730629 {}] cleaner.HFileCleaner(306): Exit Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148730629,5,FailOnTimeoutGroup] 2024-12-02T14:12:48,470 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148730630 {}] cleaner.HFileCleaner(306): Exit Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148730630,5,FailOnTimeoutGroup] 2024-12-02T14:12:48,470 INFO [M:0;a3a61c9ba14f:46157 {}] hbase.ChoreService(370): Chore service for: master/a3a61c9ba14f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T14:12:48,471 INFO [M:0;a3a61c9ba14f:46157 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:12:48,471 DEBUG [M:0;a3a61c9ba14f:46157 {}] master.HMaster(1795): Stopping service threads 2024-12-02T14:12:48,471 INFO [M:0;a3a61c9ba14f:46157 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T14:12:48,471 INFO [M:0;a3a61c9ba14f:46157 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:12:48,471 INFO [M:0;a3a61c9ba14f:46157 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T14:12:48,471 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T14:12:48,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T14:12:48,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:48,471 DEBUG [M:0;a3a61c9ba14f:46157 {}] zookeeper.ZKUtil(347): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T14:12:48,471 WARN [M:0;a3a61c9ba14f:46157 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T14:12:48,472 INFO [M:0;a3a61c9ba14f:46157 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/.lastflushedseqids 2024-12-02T14:12:48,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741883_1059 (size=228) 2024-12-02T14:12:48,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741883_1059 (size=228) 2024-12-02T14:12:48,476 INFO [M:0;a3a61c9ba14f:46157 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T14:12:48,476 INFO [M:0;a3a61c9ba14f:46157 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T14:12:48,477 DEBUG [M:0;a3a61c9ba14f:46157 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:12:48,477 INFO [M:0;a3a61c9ba14f:46157 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:48,477 DEBUG [M:0;a3a61c9ba14f:46157 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:48,477 DEBUG [M:0;a3a61c9ba14f:46157 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:12:48,477 DEBUG [M:0;a3a61c9ba14f:46157 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:48,477 INFO [M:0;a3a61c9ba14f:46157 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.38 KB 2024-12-02T14:12:48,490 DEBUG [M:0;a3a61c9ba14f:46157 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/52dbd7c3bb81469e97cae2fec36abd20 is 82, key is hbase:meta,,1/info:regioninfo/1733148731295/Put/seqid=0 2024-12-02T14:12:48,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741884_1060 (size=5672) 2024-12-02T14:12:48,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741884_1060 (size=5672) 2024-12-02T14:12:48,495 INFO [M:0;a3a61c9ba14f:46157 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/52dbd7c3bb81469e97cae2fec36abd20 2024-12-02T14:12:48,511 DEBUG [M:0;a3a61c9ba14f:46157 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c458d5ffb9934aa7be931fabb8d1f9f5 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733148731767/Put/seqid=0 2024-12-02T14:12:48,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741885_1061 (size=7090) 2024-12-02T14:12:48,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741885_1061 (size=7090) 2024-12-02T14:12:48,516 INFO [M:0;a3a61c9ba14f:46157 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c458d5ffb9934aa7be931fabb8d1f9f5 2024-12-02T14:12:48,520 INFO [M:0;a3a61c9ba14f:46157 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c458d5ffb9934aa7be931fabb8d1f9f5 2024-12-02T14:12:48,533 DEBUG [M:0;a3a61c9ba14f:46157 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/10fe8ec5249b4788a469dbdd38b5bc9f is 69, key is a3a61c9ba14f,43655,1733148730445/rs:state/1733148730696/Put/seqid=0 2024-12-02T14:12:48,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741886_1062 (size=5156) 2024-12-02T14:12:48,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741886_1062 (size=5156) 2024-12-02T14:12:48,537 INFO [M:0;a3a61c9ba14f:46157 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/10fe8ec5249b4788a469dbdd38b5bc9f 2024-12-02T14:12:48,556 DEBUG [M:0;a3a61c9ba14f:46157 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/95f82353c49a4896b5897cf171e3989e is 52, key is load_balancer_on/state:d/1733148731388/Put/seqid=0 2024-12-02T14:12:48,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741887_1063 (size=5056) 2024-12-02T14:12:48,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741887_1063 (size=5056) 2024-12-02T14:12:48,560 INFO [M:0;a3a61c9ba14f:46157 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/95f82353c49a4896b5897cf171e3989e 2024-12-02T14:12:48,564 DEBUG [M:0;a3a61c9ba14f:46157 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/52dbd7c3bb81469e97cae2fec36abd20 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/52dbd7c3bb81469e97cae2fec36abd20 2024-12-02T14:12:48,567 INFO [M:0;a3a61c9ba14f:46157 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/52dbd7c3bb81469e97cae2fec36abd20, entries=8, sequenceid=125, filesize=5.5 K 2024-12-02T14:12:48,568 DEBUG [M:0;a3a61c9ba14f:46157 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c458d5ffb9934aa7be931fabb8d1f9f5 as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c458d5ffb9934aa7be931fabb8d1f9f5 2024-12-02T14:12:48,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:12:48,569 INFO [RS:0;a3a61c9ba14f:43655 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:12:48,569 INFO [RS:0;a3a61c9ba14f:43655 {}] regionserver.HRegionServer(1031): Exiting; stopping=a3a61c9ba14f,43655,1733148730445; zookeeper connection closed. 2024-12-02T14:12:48,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43655-0x1009b4640840001, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:12:48,570 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3450b3ca {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3450b3ca 2024-12-02T14:12:48,570 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T14:12:48,573 INFO [M:0;a3a61c9ba14f:46157 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c458d5ffb9934aa7be931fabb8d1f9f5 2024-12-02T14:12:48,573 INFO [M:0;a3a61c9ba14f:46157 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c458d5ffb9934aa7be931fabb8d1f9f5, entries=13, sequenceid=125, filesize=6.9 K 2024-12-02T14:12:48,574 DEBUG [M:0;a3a61c9ba14f:46157 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/10fe8ec5249b4788a469dbdd38b5bc9f as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/10fe8ec5249b4788a469dbdd38b5bc9f 2024-12-02T14:12:48,579 INFO [M:0;a3a61c9ba14f:46157 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/10fe8ec5249b4788a469dbdd38b5bc9f, entries=1, sequenceid=125, filesize=5.0 K 2024-12-02T14:12:48,579 DEBUG [M:0;a3a61c9ba14f:46157 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/95f82353c49a4896b5897cf171e3989e as hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/95f82353c49a4896b5897cf171e3989e 2024-12-02T14:12:48,584 INFO [M:0;a3a61c9ba14f:46157 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37165/user/jenkins/test-data/528d1c29-1f7c-c004-9992-df5b9231151f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/95f82353c49a4896b5897cf171e3989e, entries=1, sequenceid=125, filesize=4.9 K 2024-12-02T14:12:48,585 INFO [M:0;a3a61c9ba14f:46157 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=125, compaction requested=false 2024-12-02T14:12:48,587 INFO [M:0;a3a61c9ba14f:46157 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:48,587 DEBUG [M:0;a3a61c9ba14f:46157 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733148768477Disabling compacts and flushes for region at 1733148768477Disabling writes for close at 1733148768477Obtaining lock to block concurrent updates at 1733148768477Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733148768477Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64840, getOffHeapSize=0, getCellsCount=148 at 1733148768477Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733148768478 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733148768478Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733148768490 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733148768490Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733148768498 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733148768511 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733148768511Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733148768520 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733148768533 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733148768533Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733148768541 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733148768555 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733148768555Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@324ab027: reopening flushed file at 1733148768563 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b6e84d9: reopening flushed file at 1733148768567 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d03972b: reopening flushed file at 1733148768573 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5dadf7d7: reopening flushed file at 1733148768579 (+6 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 108ms, sequenceid=125, compaction requested=false at 1733148768585 (+6 ms)Writing region close event to WAL at 1733148768586 (+1 ms)Closed at 1733148768586 2024-12-02T14:12:48,587 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,587 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,587 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,587 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,587 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:48,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33913 is added to blk_1073741830_1006 (size=61332) 2024-12-02T14:12:48,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37799 is added to blk_1073741830_1006 (size=61332) 2024-12-02T14:12:48,590 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:12:48,590 INFO [M:0;a3a61c9ba14f:46157 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T14:12:48,590 INFO [M:0;a3a61c9ba14f:46157 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46157 2024-12-02T14:12:48,590 INFO [M:0;a3a61c9ba14f:46157 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:12:48,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:12:48,691 INFO [M:0;a3a61c9ba14f:46157 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:12:48,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46157-0x1009b4640840000, quorum=127.0.0.1:55059, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:12:48,701 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d31ee43{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:12:48,701 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4676912c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:12:48,701 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:12:48,702 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43909889{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:12:48,702 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51585bde{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/hadoop.log.dir/,STOPPED} 2024-12-02T14:12:48,706 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:12:48,706 WARN [BP-1782339526-172.17.0.2-1733148729772 heartbeating to localhost/127.0.0.1:37165 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:12:48,706 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:12:48,706 WARN [BP-1782339526-172.17.0.2-1733148729772 heartbeating to localhost/127.0.0.1:37165 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1782339526-172.17.0.2-1733148729772 (Datanode Uuid e03d48c0-2b2e-46d0-8ed2-f81fd33b3399) service to localhost/127.0.0.1:37165 2024-12-02T14:12:48,707 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/cluster_7c29f41e-3a16-df41-7e87-cc8256159a84/data/data3/current/BP-1782339526-172.17.0.2-1733148729772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:12:48,707 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/cluster_7c29f41e-3a16-df41-7e87-cc8256159a84/data/data4/current/BP-1782339526-172.17.0.2-1733148729772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:12:48,707 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:12:48,712 INFO [regionserver/a3a61c9ba14f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:12:48,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ce0132a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:12:48,714 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f19e3c1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:12:48,715 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:12:48,715 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4cb7e6ef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:12:48,715 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@459363d7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/hadoop.log.dir/,STOPPED} 2024-12-02T14:12:48,716 WARN [BP-1782339526-172.17.0.2-1733148729772 heartbeating to localhost/127.0.0.1:37165 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:12:48,716 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:12:48,716 WARN [BP-1782339526-172.17.0.2-1733148729772 heartbeating to localhost/127.0.0.1:37165 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1782339526-172.17.0.2-1733148729772 (Datanode Uuid 36a111aa-de33-41b0-9b6d-79fb485abcca) service to localhost/127.0.0.1:37165 2024-12-02T14:12:48,716 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:12:48,717 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/cluster_7c29f41e-3a16-df41-7e87-cc8256159a84/data/data1/current/BP-1782339526-172.17.0.2-1733148729772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:12:48,717 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/cluster_7c29f41e-3a16-df41-7e87-cc8256159a84/data/data2/current/BP-1782339526-172.17.0.2-1733148729772 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:12:48,717 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:12:48,724 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@69826858{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:12:48,725 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7faa894d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:12:48,725 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:12:48,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11e6950{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:12:48,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78d2a49d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/hadoop.log.dir/,STOPPED} 2024-12-02T14:12:48,733 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T14:12:48,774 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T14:12:48,788 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 205) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37165 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37165 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:37165 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37165 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37165 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37165 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37165 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37165 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 486) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=97 (was 132), ProcessCount=11 (was 11), AvailableMemoryMB=6126 (was 5777) - AvailableMemoryMB LEAK? - 2024-12-02T14:12:48,798 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=97, ProcessCount=11, AvailableMemoryMB=6125 2024-12-02T14:12:48,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T14:12:48,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/hadoop.log.dir so I do NOT create it in target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1 2024-12-02T14:12:48,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/851eedbb-9a2a-0901-ef18-45f0e0bfbbc3/hadoop.tmp.dir so I do NOT create it in target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1 2024-12-02T14:12:48,798 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/cluster_f7da3f95-9367-0dc7-f5fa-723db7912688, deleteOnExit=true 2024-12-02T14:12:48,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T14:12:48,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/test.cache.data in system properties and HBase conf 2024-12-02T14:12:48,798 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/hadoop.log.dir in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T14:12:48,799 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/nfs.dump.dir in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/java.io.tmpdir in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T14:12:48,799 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T14:12:48,800 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T14:12:48,811 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T14:12:48,856 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:12:48,860 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:12:48,861 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:12:48,861 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:12:48,861 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:12:48,862 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:12:48,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d118eec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:12:48,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5031c9de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:12:48,959 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3885c0c5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/java.io.tmpdir/jetty-localhost-40877-hadoop-hdfs-3_4_1-tests_jar-_-any-9862597595094289632/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:12:48,960 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2394ff19{HTTP/1.1, (http/1.1)}{localhost:40877} 2024-12-02T14:12:48,960 INFO [Time-limited test {}] server.Server(415): Started @278622ms 2024-12-02T14:12:48,975 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-02T14:12:48,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:48,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:49,068 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:12:49,071 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:12:49,077 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:12:49,077 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:12:49,077 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:12:49,078 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40f3733a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:12:49,078 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6162294b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:12:49,196 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2482618b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/java.io.tmpdir/jetty-localhost-35691-hadoop-hdfs-3_4_1-tests_jar-_-any-7277194182224187458/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:12:49,196 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@56836339{HTTP/1.1, (http/1.1)}{localhost:35691} 2024-12-02T14:12:49,196 INFO [Time-limited test {}] server.Server(415): Started @278859ms 2024-12-02T14:12:49,197 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:12:49,252 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T14:12:49,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:49,263 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T14:12:49,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T14:12:49,273 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T14:12:49,273 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T14:12:49,274 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e9b8f9f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/hadoop.log.dir/,AVAILABLE} 2024-12-02T14:12:49,274 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47b9368{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T14:12:49,281 WARN [Thread-2493 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/cluster_f7da3f95-9367-0dc7-f5fa-723db7912688/data/data1/current/BP-480779925-172.17.0.2-1733148768815/current, will proceed with Du for space computation calculation, 2024-12-02T14:12:49,284 WARN [Thread-2494 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/cluster_f7da3f95-9367-0dc7-f5fa-723db7912688/data/data2/current/BP-480779925-172.17.0.2-1733148768815/current, will proceed with Du for space computation calculation, 2024-12-02T14:12:49,310 WARN [Thread-2472 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:12:49,315 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf6cc2401364fe8a1 with lease ID 0x1993dd00447f924e: Processing first storage report for DS-9373bcf9-b147-4e45-a72e-838e7e9df9c5 from datanode DatanodeRegistration(127.0.0.1:44621, datanodeUuid=4561330a-38fa-4adb-8a7a-6207e7654803, infoPort=36853, infoSecurePort=0, ipcPort=37781, storageInfo=lv=-57;cid=testClusterID;nsid=1914490643;c=1733148768815) 2024-12-02T14:12:49,315 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf6cc2401364fe8a1 with lease ID 0x1993dd00447f924e: from storage DS-9373bcf9-b147-4e45-a72e-838e7e9df9c5 node DatanodeRegistration(127.0.0.1:44621, datanodeUuid=4561330a-38fa-4adb-8a7a-6207e7654803, infoPort=36853, infoSecurePort=0, ipcPort=37781, storageInfo=lv=-57;cid=testClusterID;nsid=1914490643;c=1733148768815), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:12:49,315 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf6cc2401364fe8a1 with lease ID 0x1993dd00447f924e: Processing first storage report for DS-137e4653-fe5f-41da-ac1a-e9fa495a6c7f from datanode DatanodeRegistration(127.0.0.1:44621, datanodeUuid=4561330a-38fa-4adb-8a7a-6207e7654803, infoPort=36853, infoSecurePort=0, ipcPort=37781, storageInfo=lv=-57;cid=testClusterID;nsid=1914490643;c=1733148768815) 2024-12-02T14:12:49,315 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf6cc2401364fe8a1 with lease ID 0x1993dd00447f924e: from storage DS-137e4653-fe5f-41da-ac1a-e9fa495a6c7f node DatanodeRegistration(127.0.0.1:44621, datanodeUuid=4561330a-38fa-4adb-8a7a-6207e7654803, infoPort=36853, infoSecurePort=0, ipcPort=37781, storageInfo=lv=-57;cid=testClusterID;nsid=1914490643;c=1733148768815), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:12:49,388 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@663b7fb1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/java.io.tmpdir/jetty-localhost-34541-hadoop-hdfs-3_4_1-tests_jar-_-any-10606926889454159955/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:12:49,389 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@644054b{HTTP/1.1, (http/1.1)}{localhost:34541} 2024-12-02T14:12:49,389 INFO [Time-limited test {}] server.Server(415): Started @279051ms 2024-12-02T14:12:49,390 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T14:12:49,440 WARN [Thread-2519 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/cluster_f7da3f95-9367-0dc7-f5fa-723db7912688/data/data3/current/BP-480779925-172.17.0.2-1733148768815/current, will proceed with Du for space computation calculation, 2024-12-02T14:12:49,441 WARN [Thread-2520 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/cluster_f7da3f95-9367-0dc7-f5fa-723db7912688/data/data4/current/BP-480779925-172.17.0.2-1733148768815/current, will proceed with Du for space computation calculation, 2024-12-02T14:12:49,457 WARN [Thread-2508 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T14:12:49,459 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x88e2fe722a79d184 with lease ID 0x1993dd00447f924f: Processing first storage report for DS-3dc3cfab-5ca5-4a50-b7e6-2cd7d01954a3 from datanode DatanodeRegistration(127.0.0.1:41803, datanodeUuid=739f7589-b5b1-4cfb-9028-2b490219cf81, infoPort=35991, infoSecurePort=0, ipcPort=45307, storageInfo=lv=-57;cid=testClusterID;nsid=1914490643;c=1733148768815) 2024-12-02T14:12:49,459 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x88e2fe722a79d184 with lease ID 0x1993dd00447f924f: from storage DS-3dc3cfab-5ca5-4a50-b7e6-2cd7d01954a3 node DatanodeRegistration(127.0.0.1:41803, datanodeUuid=739f7589-b5b1-4cfb-9028-2b490219cf81, infoPort=35991, infoSecurePort=0, ipcPort=45307, storageInfo=lv=-57;cid=testClusterID;nsid=1914490643;c=1733148768815), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:12:49,459 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x88e2fe722a79d184 with lease ID 0x1993dd00447f924f: Processing first storage report for DS-32a9b79d-6cbe-4cd6-9564-55cee71dcf03 from datanode DatanodeRegistration(127.0.0.1:41803, datanodeUuid=739f7589-b5b1-4cfb-9028-2b490219cf81, infoPort=35991, infoSecurePort=0, ipcPort=45307, storageInfo=lv=-57;cid=testClusterID;nsid=1914490643;c=1733148768815) 2024-12-02T14:12:49,459 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x88e2fe722a79d184 with lease ID 0x1993dd00447f924f: from storage DS-32a9b79d-6cbe-4cd6-9564-55cee71dcf03 node DatanodeRegistration(127.0.0.1:41803, datanodeUuid=739f7589-b5b1-4cfb-9028-2b490219cf81, infoPort=35991, infoSecurePort=0, ipcPort=45307, storageInfo=lv=-57;cid=testClusterID;nsid=1914490643;c=1733148768815), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T14:12:49,511 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1 2024-12-02T14:12:49,515 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/cluster_f7da3f95-9367-0dc7-f5fa-723db7912688/zookeeper_0, clientPort=59622, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/cluster_f7da3f95-9367-0dc7-f5fa-723db7912688/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/cluster_f7da3f95-9367-0dc7-f5fa-723db7912688/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T14:12:49,516 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59622 2024-12-02T14:12:49,516 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:49,518 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:49,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:12:49,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741825_1001 (size=7) 2024-12-02T14:12:49,550 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6 with version=8 2024-12-02T14:12:49,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42389/user/jenkins/test-data/efa7b9a1-cce7-8bb6-6457-9b4bead6bec1/hbase-staging 2024-12-02T14:12:49,553 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:12:49,553 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:12:49,553 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:12:49,553 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:12:49,553 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:12:49,553 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:12:49,553 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T14:12:49,553 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:12:49,554 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44757 2024-12-02T14:12:49,555 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44757 connecting to ZooKeeper ensemble=127.0.0.1:59622 2024-12-02T14:12:49,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:447570x0, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:12:49,562 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44757-0x1009b46d9580000 connected 2024-12-02T14:12:49,579 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:49,581 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:49,583 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:12:49,584 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6, hbase.cluster.distributed=false 2024-12-02T14:12:49,585 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:12:49,586 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44757 2024-12-02T14:12:49,586 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44757 2024-12-02T14:12:49,586 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44757 2024-12-02T14:12:49,587 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44757 2024-12-02T14:12:49,587 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44757 2024-12-02T14:12:49,607 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a3a61c9ba14f:0 server-side Connection retries=45 2024-12-02T14:12:49,607 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:12:49,607 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T14:12:49,608 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T14:12:49,608 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T14:12:49,608 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T14:12:49,608 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T14:12:49,608 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T14:12:49,609 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46109 2024-12-02T14:12:49,611 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46109 connecting to ZooKeeper ensemble=127.0.0.1:59622 2024-12-02T14:12:49,611 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:49,614 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:49,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:461090x0, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T14:12:49,618 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46109-0x1009b46d9580001 connected 2024-12-02T14:12:49,618 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:12:49,619 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T14:12:49,621 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T14:12:49,621 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T14:12:49,622 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T14:12:49,623 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46109 2024-12-02T14:12:49,623 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46109 2024-12-02T14:12:49,623 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46109 2024-12-02T14:12:49,623 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46109 2024-12-02T14:12:49,625 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46109 2024-12-02T14:12:49,635 DEBUG [M:0;a3a61c9ba14f:44757 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a3a61c9ba14f:44757 2024-12-02T14:12:49,636 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a3a61c9ba14f,44757,1733148769553 2024-12-02T14:12:49,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:12:49,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:12:49,637 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a3a61c9ba14f,44757,1733148769553 2024-12-02T14:12:49,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T14:12:49,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:49,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:49,638 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T14:12:49,639 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a3a61c9ba14f,44757,1733148769553 from backup master directory 2024-12-02T14:12:49,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a3a61c9ba14f,44757,1733148769553 2024-12-02T14:12:49,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:12:49,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T14:12:49,639 WARN [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:12:49,639 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a3a61c9ba14f,44757,1733148769553 2024-12-02T14:12:49,642 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/hbase.id] with ID: 7fbc82da-b791-45a6-be1c-5870af68289d 2024-12-02T14:12:49,642 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/.tmp/hbase.id 2024-12-02T14:12:49,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:12:49,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741826_1002 (size=42) 2024-12-02T14:12:49,651 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/.tmp/hbase.id]:[hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/hbase.id] 2024-12-02T14:12:49,660 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:49,660 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T14:12:49,661 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-02T14:12:49,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:49,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:49,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:12:49,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741827_1003 (size=196) 2024-12-02T14:12:49,671 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T14:12:49,672 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T14:12:49,672 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:12:49,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:12:49,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741828_1004 (size=1189) 2024-12-02T14:12:49,679 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store 2024-12-02T14:12:49,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:12:49,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741829_1005 (size=34) 2024-12-02T14:12:49,685 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:12:49,685 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:12:49,685 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:49,685 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:49,685 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:12:49,685 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:49,685 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:49,685 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733148769685Disabling compacts and flushes for region at 1733148769685Disabling writes for close at 1733148769685Writing region close event to WAL at 1733148769685Closed at 1733148769685 2024-12-02T14:12:49,686 WARN [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/.initializing 2024-12-02T14:12:49,686 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/WALs/a3a61c9ba14f,44757,1733148769553 2024-12-02T14:12:49,688 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C44757%2C1733148769553, suffix=, logDir=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/WALs/a3a61c9ba14f,44757,1733148769553, archiveDir=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/oldWALs, maxLogs=10 2024-12-02T14:12:49,689 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C44757%2C1733148769553.1733148769688 2024-12-02T14:12:49,693 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/WALs/a3a61c9ba14f,44757,1733148769553/a3a61c9ba14f%2C44757%2C1733148769553.1733148769688 2024-12-02T14:12:49,698 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35991:35991),(127.0.0.1/127.0.0.1:36853:36853)] 2024-12-02T14:12:49,700 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:12:49,700 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:12:49,700 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:49,700 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:49,701 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:49,703 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T14:12:49,703 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:49,703 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:49,703 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:49,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T14:12:49,704 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:49,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:12:49,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:49,705 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T14:12:49,705 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:49,706 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:12:49,706 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:49,707 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T14:12:49,707 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:49,707 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T14:12:49,707 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:49,708 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:49,708 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:49,709 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:49,709 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:49,710 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T14:12:49,711 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T14:12:49,713 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:12:49,713 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=803444, jitterRate=0.021633103489875793}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T14:12:49,714 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733148769700Initializing all the Stores at 1733148769701 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148769701Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148769701Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148769701Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148769701Cleaning up temporary data from old regions at 1733148769709 (+8 ms)Region opened successfully at 1733148769714 (+5 ms) 2024-12-02T14:12:49,714 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T14:12:49,717 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@455c6988, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:12:49,718 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T14:12:49,718 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T14:12:49,718 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T14:12:49,718 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T14:12:49,718 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-02T14:12:49,719 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-02T14:12:49,719 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T14:12:49,726 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T14:12:49,727 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T14:12:49,727 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T14:12:49,728 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T14:12:49,728 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T14:12:49,729 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T14:12:49,729 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T14:12:49,730 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T14:12:49,730 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T14:12:49,731 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T14:12:49,732 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T14:12:49,734 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T14:12:49,735 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T14:12:49,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:12:49,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T14:12:49,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:49,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:49,736 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a3a61c9ba14f,44757,1733148769553, sessionid=0x1009b46d9580000, setting cluster-up flag (Was=false) 2024-12-02T14:12:49,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:49,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:49,741 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T14:12:49,742 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a3a61c9ba14f,44757,1733148769553 2024-12-02T14:12:49,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:49,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:49,746 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T14:12:49,747 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a3a61c9ba14f,44757,1733148769553 2024-12-02T14:12:49,749 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T14:12:49,750 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T14:12:49,750 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T14:12:49,750 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T14:12:49,751 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a3a61c9ba14f,44757,1733148769553 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T14:12:49,752 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:12:49,752 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:12:49,752 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:12:49,752 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=5, maxPoolSize=5 2024-12-02T14:12:49,752 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a3a61c9ba14f:0, corePoolSize=10, maxPoolSize=10 2024-12-02T14:12:49,752 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:49,752 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:12:49,752 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:49,754 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733148799754 2024-12-02T14:12:49,754 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:12:49,754 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T14:12:49,754 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T14:12:49,754 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T14:12:49,754 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T14:12:49,754 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T14:12:49,754 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T14:12:49,754 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T14:12:49,755 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:49,755 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T14:12:49,755 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T14:12:49,755 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T14:12:49,755 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:49,755 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T14:12:49,755 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T14:12:49,755 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T14:12:49,755 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148769755,5,FailOnTimeoutGroup] 2024-12-02T14:12:49,755 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148769755,5,FailOnTimeoutGroup] 2024-12-02T14:12:49,756 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:49,756 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T14:12:49,756 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:49,756 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:49,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:12:49,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741831_1007 (size=1321) 2024-12-02T14:12:49,762 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T14:12:49,763 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6 2024-12-02T14:12:49,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:12:49,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741832_1008 (size=32) 2024-12-02T14:12:49,768 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:12:49,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:12:49,770 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:12:49,770 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:49,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:49,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:12:49,772 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:12:49,772 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:49,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:49,773 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:12:49,774 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:12:49,774 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:49,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:49,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:12:49,775 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:12:49,775 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:49,775 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:49,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:12:49,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/data/hbase/meta/1588230740 2024-12-02T14:12:49,776 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/data/hbase/meta/1588230740 2024-12-02T14:12:49,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:12:49,777 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:12:49,778 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:12:49,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:12:49,781 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T14:12:49,781 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846784, jitterRate=0.07674175500869751}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:12:49,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733148769769Initializing all the Stores at 1733148769769Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148769769Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148769769Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148769769Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148769769Cleaning up temporary data from old regions at 1733148769777 (+8 ms)Region opened successfully at 1733148769782 (+5 ms) 2024-12-02T14:12:49,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:12:49,782 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:12:49,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:12:49,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:12:49,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:12:49,782 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:12:49,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733148769782Disabling compacts and flushes for region at 1733148769782Disabling writes for close at 1733148769782Writing region close event to WAL at 1733148769782Closed at 1733148769782 2024-12-02T14:12:49,783 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:12:49,783 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T14:12:49,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T14:12:49,784 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:12:49,785 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T14:12:49,828 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(746): ClusterId : 7fbc82da-b791-45a6-be1c-5870af68289d 2024-12-02T14:12:49,828 DEBUG [RS:0;a3a61c9ba14f:46109 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T14:12:49,830 DEBUG [RS:0;a3a61c9ba14f:46109 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T14:12:49,830 DEBUG [RS:0;a3a61c9ba14f:46109 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T14:12:49,833 DEBUG [RS:0;a3a61c9ba14f:46109 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T14:12:49,833 DEBUG [RS:0;a3a61c9ba14f:46109 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73dadff0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a3a61c9ba14f/172.17.0.2:0 2024-12-02T14:12:49,846 DEBUG [RS:0;a3a61c9ba14f:46109 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a3a61c9ba14f:46109 2024-12-02T14:12:49,847 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T14:12:49,847 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T14:12:49,847 DEBUG [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T14:12:49,847 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(2659): reportForDuty to master=a3a61c9ba14f,44757,1733148769553 with port=46109, startcode=1733148769607 2024-12-02T14:12:49,847 DEBUG [RS:0;a3a61c9ba14f:46109 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T14:12:49,849 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43617, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T14:12:49,850 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44757 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a3a61c9ba14f,46109,1733148769607 2024-12-02T14:12:49,850 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44757 {}] master.ServerManager(517): Registering regionserver=a3a61c9ba14f,46109,1733148769607 2024-12-02T14:12:49,851 DEBUG [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6 2024-12-02T14:12:49,851 DEBUG [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39117 2024-12-02T14:12:49,851 DEBUG [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T14:12:49,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:12:49,852 DEBUG [RS:0;a3a61c9ba14f:46109 {}] zookeeper.ZKUtil(111): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a3a61c9ba14f,46109,1733148769607 2024-12-02T14:12:49,852 WARN [RS:0;a3a61c9ba14f:46109 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T14:12:49,853 INFO [RS:0;a3a61c9ba14f:46109 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:12:49,853 DEBUG [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/WALs/a3a61c9ba14f,46109,1733148769607 2024-12-02T14:12:49,853 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a3a61c9ba14f,46109,1733148769607] 2024-12-02T14:12:49,856 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T14:12:49,857 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T14:12:49,857 INFO [RS:0;a3a61c9ba14f:46109 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T14:12:49,857 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:49,857 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T14:12:49,858 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T14:12:49,858 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:49,858 DEBUG [RS:0;a3a61c9ba14f:46109 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:49,858 DEBUG [RS:0;a3a61c9ba14f:46109 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:49,858 DEBUG [RS:0;a3a61c9ba14f:46109 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:49,858 DEBUG [RS:0;a3a61c9ba14f:46109 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:49,858 DEBUG [RS:0;a3a61c9ba14f:46109 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:49,858 DEBUG [RS:0;a3a61c9ba14f:46109 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=2, maxPoolSize=2 2024-12-02T14:12:49,858 DEBUG [RS:0;a3a61c9ba14f:46109 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:49,858 DEBUG [RS:0;a3a61c9ba14f:46109 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:49,859 DEBUG [RS:0;a3a61c9ba14f:46109 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:49,859 DEBUG [RS:0;a3a61c9ba14f:46109 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:49,859 DEBUG [RS:0;a3a61c9ba14f:46109 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:49,859 DEBUG [RS:0;a3a61c9ba14f:46109 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a3a61c9ba14f:0, corePoolSize=1, maxPoolSize=1 2024-12-02T14:12:49,859 DEBUG [RS:0;a3a61c9ba14f:46109 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:12:49,859 DEBUG [RS:0;a3a61c9ba14f:46109 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a3a61c9ba14f:0, corePoolSize=3, maxPoolSize=3 2024-12-02T14:12:49,859 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:49,859 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:49,859 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:49,859 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:49,859 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:49,859 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,46109,1733148769607-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:12:49,871 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T14:12:49,871 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,46109,1733148769607-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:49,871 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:49,871 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.Replication(171): a3a61c9ba14f,46109,1733148769607 started 2024-12-02T14:12:49,882 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:49,882 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(1482): Serving as a3a61c9ba14f,46109,1733148769607, RpcServer on a3a61c9ba14f/172.17.0.2:46109, sessionid=0x1009b46d9580001 2024-12-02T14:12:49,882 DEBUG [RS:0;a3a61c9ba14f:46109 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T14:12:49,882 DEBUG [RS:0;a3a61c9ba14f:46109 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a3a61c9ba14f,46109,1733148769607 2024-12-02T14:12:49,883 DEBUG [RS:0;a3a61c9ba14f:46109 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,46109,1733148769607' 2024-12-02T14:12:49,883 DEBUG [RS:0;a3a61c9ba14f:46109 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T14:12:49,883 DEBUG [RS:0;a3a61c9ba14f:46109 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T14:12:49,883 DEBUG [RS:0;a3a61c9ba14f:46109 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T14:12:49,883 DEBUG [RS:0;a3a61c9ba14f:46109 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T14:12:49,883 DEBUG [RS:0;a3a61c9ba14f:46109 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a3a61c9ba14f,46109,1733148769607 2024-12-02T14:12:49,883 DEBUG [RS:0;a3a61c9ba14f:46109 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a3a61c9ba14f,46109,1733148769607' 2024-12-02T14:12:49,883 DEBUG [RS:0;a3a61c9ba14f:46109 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T14:12:49,884 DEBUG [RS:0;a3a61c9ba14f:46109 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T14:12:49,884 DEBUG [RS:0;a3a61c9ba14f:46109 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T14:12:49,884 INFO [RS:0;a3a61c9ba14f:46109 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T14:12:49,884 INFO [RS:0;a3a61c9ba14f:46109 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T14:12:49,935 WARN [a3a61c9ba14f:44757 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T14:12:49,986 INFO [RS:0;a3a61c9ba14f:46109 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C46109%2C1733148769607, suffix=, logDir=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/WALs/a3a61c9ba14f,46109,1733148769607, archiveDir=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/oldWALs, maxLogs=32 2024-12-02T14:12:49,987 INFO [RS:0;a3a61c9ba14f:46109 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C46109%2C1733148769607.1733148769986 2024-12-02T14:12:49,991 INFO [RS:0;a3a61c9ba14f:46109 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/WALs/a3a61c9ba14f,46109,1733148769607/a3a61c9ba14f%2C46109%2C1733148769607.1733148769986 2024-12-02T14:12:49,992 DEBUG [RS:0;a3a61c9ba14f:46109 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35991:35991),(127.0.0.1/127.0.0.1:36853:36853)] 2024-12-02T14:12:49,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,32833,1733148592935/a3a61c9ba14f%2C32833%2C1733148592935.meta.1733148593746.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:49,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/WALs/a3a61c9ba14f,36269,1733148593887/a3a61c9ba14f%2C36269%2C1733148593887.1733148594073 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:50,185 DEBUG [a3a61c9ba14f:44757 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-02T14:12:50,186 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a3a61c9ba14f,46109,1733148769607 2024-12-02T14:12:50,187 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a3a61c9ba14f,46109,1733148769607, state=OPENING 2024-12-02T14:12:50,188 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T14:12:50,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:50,189 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:50,190 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:12:50,190 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:12:50,190 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T14:12:50,190 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,46109,1733148769607}] 2024-12-02T14:12:50,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:33497/user/jenkins/test-data/25c8e370-7b38-586e-74ce-45b1b62ffb7b/MasterData/WALs/a3a61c9ba14f,37979,1733148592890/a3a61c9ba14f%2C37979%2C1733148592890.1733148593043 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-02T14:12:50,343 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T14:12:50,346 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37853, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T14:12:50,353 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T14:12:50,353 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:12:50,356 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a3a61c9ba14f%2C46109%2C1733148769607.meta, suffix=.meta, logDir=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/WALs/a3a61c9ba14f,46109,1733148769607, archiveDir=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/oldWALs, maxLogs=32 2024-12-02T14:12:50,356 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a3a61c9ba14f%2C46109%2C1733148769607.meta.1733148770356.meta 2024-12-02T14:12:50,363 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/WALs/a3a61c9ba14f,46109,1733148769607/a3a61c9ba14f%2C46109%2C1733148769607.meta.1733148770356.meta 2024-12-02T14:12:50,365 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35991:35991),(127.0.0.1/127.0.0.1:36853:36853)] 2024-12-02T14:12:50,366 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T14:12:50,366 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T14:12:50,366 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T14:12:50,367 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T14:12:50,367 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T14:12:50,367 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T14:12:50,367 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T14:12:50,367 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T14:12:50,368 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T14:12:50,369 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T14:12:50,369 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:50,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:50,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T14:12:50,370 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T14:12:50,370 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:50,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:50,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T14:12:50,371 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T14:12:50,371 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:50,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:50,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T14:12:50,372 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T14:12:50,372 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T14:12:50,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T14:12:50,372 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T14:12:50,373 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/data/hbase/meta/1588230740 2024-12-02T14:12:50,374 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/data/hbase/meta/1588230740 2024-12-02T14:12:50,375 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T14:12:50,375 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T14:12:50,375 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-02T14:12:50,377 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T14:12:50,377 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=864066, jitterRate=0.09871770441532135}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-02T14:12:50,378 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T14:12:50,378 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733148770367Writing region info on filesystem at 1733148770367Initializing all the Stores at 1733148770368 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148770368Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148770368Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733148770368Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733148770368Cleaning up temporary data from old regions at 1733148770375 (+7 ms)Running coprocessor post-open hooks at 1733148770378 (+3 ms)Region opened successfully at 1733148770378 2024-12-02T14:12:50,379 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733148770343 2024-12-02T14:12:50,381 DEBUG [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T14:12:50,381 INFO [RS_OPEN_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T14:12:50,382 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a3a61c9ba14f,46109,1733148769607 2024-12-02T14:12:50,383 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a3a61c9ba14f,46109,1733148769607, state=OPEN 2024-12-02T14:12:50,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:12:50,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T14:12:50,385 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,46109,1733148769607 2024-12-02T14:12:50,385 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:12:50,385 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T14:12:50,388 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T14:12:50,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a3a61c9ba14f,46109,1733148769607 in 195 msec 2024-12-02T14:12:50,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T14:12:50,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 606 msec 2024-12-02T14:12:50,393 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T14:12:50,393 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T14:12:50,394 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:12:50,394 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a3a61c9ba14f,46109,1733148769607, seqNum=-1] 2024-12-02T14:12:50,395 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:12:50,396 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47889, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:12:50,402 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 651 msec 2024-12-02T14:12:50,402 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733148770402, completionTime=-1 2024-12-02T14:12:50,402 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-02T14:12:50,402 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T14:12:50,404 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-02T14:12:50,404 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733148830404 2024-12-02T14:12:50,404 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733148890404 2024-12-02T14:12:50,404 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-02T14:12:50,404 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,44757,1733148769553-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:50,404 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,44757,1733148769553-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:50,404 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,44757,1733148769553-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:50,404 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a3a61c9ba14f:44757, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:50,404 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:50,405 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:50,406 DEBUG [master/a3a61c9ba14f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T14:12:50,408 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.769sec 2024-12-02T14:12:50,408 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T14:12:50,408 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T14:12:50,408 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T14:12:50,408 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T14:12:50,408 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T14:12:50,408 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,44757,1733148769553-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T14:12:50,408 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,44757,1733148769553-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T14:12:50,410 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T14:12:50,410 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T14:12:50,410 INFO [master/a3a61c9ba14f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a3a61c9ba14f,44757,1733148769553-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T14:12:50,428 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c9b811e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:12:50,428 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a3a61c9ba14f,44757,-1 for getting cluster id 2024-12-02T14:12:50,428 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T14:12:50,429 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7fbc82da-b791-45a6-be1c-5870af68289d' 2024-12-02T14:12:50,429 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T14:12:50,429 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7fbc82da-b791-45a6-be1c-5870af68289d" 2024-12-02T14:12:50,429 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1be746b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:12:50,430 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a3a61c9ba14f,44757,-1] 2024-12-02T14:12:50,430 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T14:12:50,430 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:50,431 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35544, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T14:12:50,431 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11ec225b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T14:12:50,432 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T14:12:50,433 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a3a61c9ba14f,46109,1733148769607, seqNum=-1] 2024-12-02T14:12:50,433 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T14:12:50,434 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34510, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T14:12:50,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a3a61c9ba14f,44757,1733148769553 2024-12-02T14:12:50,436 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T14:12:50,439 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-02T14:12:50,439 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-02T14:12:50,442 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/WALs/test.com,8080,1, archiveDir=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/oldWALs, maxLogs=32 2024-12-02T14:12:50,442 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733148770442 2024-12-02T14:12:50,448 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/WALs/test.com,8080,1/test.com%2C8080%2C1.1733148770442 2024-12-02T14:12:50,449 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36853:36853),(127.0.0.1/127.0.0.1:35991:35991)] 2024-12-02T14:12:50,450 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733148770450 2024-12-02T14:12:50,456 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,456 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,456 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,456 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,456 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,457 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/WALs/test.com,8080,1/test.com%2C8080%2C1.1733148770442 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/WALs/test.com,8080,1/test.com%2C8080%2C1.1733148770450 2024-12-02T14:12:50,458 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36853:36853),(127.0.0.1/127.0.0.1:35991:35991)] 2024-12-02T14:12:50,458 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/WALs/test.com,8080,1/test.com%2C8080%2C1.1733148770442 is not closed yet, will try archiving it next time 2024-12-02T14:12:50,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741835_1011 (size=93) 2024-12-02T14:12:50,459 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,459 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,459 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,459 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,459 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741835_1011 (size=93) 2024-12-02T14:12:50,460 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/WALs/test.com,8080,1/test.com%2C8080%2C1.1733148770442 to hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/oldWALs/test.com%2C8080%2C1.1733148770442 2024-12-02T14:12:50,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741836_1012 (size=93) 2024-12-02T14:12:50,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741836_1012 (size=93) 2024-12-02T14:12:50,464 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/oldWALs 2024-12-02T14:12:50,465 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733148770450) 2024-12-02T14:12:50,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T14:12:50,465 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:12:50,465 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:12:50,465 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:50,465 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:50,465 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T14:12:50,465 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T14:12:50,465 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2146655676, stopped=false 2024-12-02T14:12:50,466 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a3a61c9ba14f,44757,1733148769553 2024-12-02T14:12:50,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:12:50,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T14:12:50,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:50,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:50,467 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:12:50,467 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T14:12:50,467 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:12:50,467 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:50,467 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a3a61c9ba14f,46109,1733148769607' ***** 2024-12-02T14:12:50,467 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T14:12:50,467 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:12:50,467 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T14:12:50,468 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T14:12:50,468 INFO [RS:0;a3a61c9ba14f:46109 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T14:12:50,468 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T14:12:50,468 INFO [RS:0;a3a61c9ba14f:46109 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T14:12:50,468 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(959): stopping server a3a61c9ba14f,46109,1733148769607 2024-12-02T14:12:50,468 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:12:50,468 INFO [RS:0;a3a61c9ba14f:46109 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a3a61c9ba14f:46109. 2024-12-02T14:12:50,468 DEBUG [RS:0;a3a61c9ba14f:46109 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T14:12:50,468 DEBUG [RS:0;a3a61c9ba14f:46109 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:50,468 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T14:12:50,468 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T14:12:50,468 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T14:12:50,469 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T14:12:50,469 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T14:12:50,469 DEBUG [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-02T14:12:50,469 DEBUG [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-02T14:12:50,469 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T14:12:50,469 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T14:12:50,469 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T14:12:50,469 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T14:12:50,469 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T14:12:50,469 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-02T14:12:50,483 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/data/hbase/meta/1588230740/.tmp/ns/8e327572a33d4201971efdc4fbb536cb is 43, key is default/ns:d/1733148770397/Put/seqid=0 2024-12-02T14:12:50,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741837_1013 (size=5153) 2024-12-02T14:12:50,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741837_1013 (size=5153) 2024-12-02T14:12:50,487 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/data/hbase/meta/1588230740/.tmp/ns/8e327572a33d4201971efdc4fbb536cb 2024-12-02T14:12:50,493 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/data/hbase/meta/1588230740/.tmp/ns/8e327572a33d4201971efdc4fbb536cb as hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/data/hbase/meta/1588230740/ns/8e327572a33d4201971efdc4fbb536cb 2024-12-02T14:12:50,497 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/data/hbase/meta/1588230740/ns/8e327572a33d4201971efdc4fbb536cb, entries=2, sequenceid=6, filesize=5.0 K 2024-12-02T14:12:50,498 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false 2024-12-02T14:12:50,501 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T14:12:50,502 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T14:12:50,502 INFO [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T14:12:50,502 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733148770469Running coprocessor pre-close hooks at 1733148770469Disabling compacts and flushes for region at 1733148770469Disabling writes for close at 1733148770469Obtaining lock to block concurrent updates at 1733148770469Preparing flush snapshotting stores in 1588230740 at 1733148770469Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733148770469Flushing stores of hbase:meta,,1.1588230740 at 1733148770470 (+1 ms)Flushing 1588230740/ns: creating writer at 1733148770470Flushing 1588230740/ns: appending metadata at 1733148770482 (+12 ms)Flushing 1588230740/ns: closing flushed file at 1733148770482Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45449768: reopening flushed file at 1733148770492 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 29ms, sequenceid=6, compaction requested=false at 1733148770498 (+6 ms)Writing region close event to WAL at 1733148770498Running coprocessor post-close hooks at 1733148770502 (+4 ms)Closed at 1733148770502 2024-12-02T14:12:50,502 DEBUG [RS_CLOSE_META-regionserver/a3a61c9ba14f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T14:12:50,669 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(976): stopping server a3a61c9ba14f,46109,1733148769607; all regions closed. 2024-12-02T14:12:50,670 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,670 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,670 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,671 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,671 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741834_1010 (size=1152) 2024-12-02T14:12:50,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741834_1010 (size=1152) 2024-12-02T14:12:50,679 DEBUG [RS:0;a3a61c9ba14f:46109 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/oldWALs 2024-12-02T14:12:50,679 INFO [RS:0;a3a61c9ba14f:46109 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C46109%2C1733148769607.meta:.meta(num 1733148770356) 2024-12-02T14:12:50,680 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,680 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,680 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,680 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,680 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741833_1009 (size=93) 2024-12-02T14:12:50,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741833_1009 (size=93) 2024-12-02T14:12:50,685 DEBUG [RS:0;a3a61c9ba14f:46109 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/oldWALs 2024-12-02T14:12:50,685 INFO [RS:0;a3a61c9ba14f:46109 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a3a61c9ba14f%2C46109%2C1733148769607:(num 1733148769986) 2024-12-02T14:12:50,685 DEBUG [RS:0;a3a61c9ba14f:46109 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T14:12:50,685 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T14:12:50,685 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:12:50,685 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.ChoreService(370): Chore service for: regionserver/a3a61c9ba14f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T14:12:50,685 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:12:50,685 INFO [regionserver/a3a61c9ba14f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:12:50,685 INFO [RS:0;a3a61c9ba14f:46109 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46109 2024-12-02T14:12:50,686 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:12:50,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T14:12:50,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a3a61c9ba14f,46109,1733148769607 2024-12-02T14:12:50,687 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a3a61c9ba14f,46109,1733148769607] 2024-12-02T14:12:50,688 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a3a61c9ba14f,46109,1733148769607 already deleted, retry=false 2024-12-02T14:12:50,688 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a3a61c9ba14f,46109,1733148769607 expired; onlineServers=0 2024-12-02T14:12:50,688 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a3a61c9ba14f,44757,1733148769553' ***** 2024-12-02T14:12:50,688 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T14:12:50,688 INFO [M:0;a3a61c9ba14f:44757 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T14:12:50,688 INFO [M:0;a3a61c9ba14f:44757 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T14:12:50,688 DEBUG [M:0;a3a61c9ba14f:44757 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T14:12:50,688 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T14:12:50,688 DEBUG [M:0;a3a61c9ba14f:44757 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T14:12:50,688 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148769755 {}] cleaner.HFileCleaner(306): Exit Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.large.0-1733148769755,5,FailOnTimeoutGroup] 2024-12-02T14:12:50,688 DEBUG [master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148769755 {}] cleaner.HFileCleaner(306): Exit Thread[master/a3a61c9ba14f:0:becomeActiveMaster-HFileCleaner.small.0-1733148769755,5,FailOnTimeoutGroup] 2024-12-02T14:12:50,688 INFO [M:0;a3a61c9ba14f:44757 {}] hbase.ChoreService(370): Chore service for: master/a3a61c9ba14f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T14:12:50,688 INFO [M:0;a3a61c9ba14f:44757 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T14:12:50,688 DEBUG [M:0;a3a61c9ba14f:44757 {}] master.HMaster(1795): Stopping service threads 2024-12-02T14:12:50,688 INFO [M:0;a3a61c9ba14f:44757 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T14:12:50,688 INFO [M:0;a3a61c9ba14f:44757 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T14:12:50,689 INFO [M:0;a3a61c9ba14f:44757 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T14:12:50,689 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T14:12:50,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T14:12:50,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T14:12:50,689 DEBUG [M:0;a3a61c9ba14f:44757 {}] zookeeper.ZKUtil(347): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T14:12:50,689 WARN [M:0;a3a61c9ba14f:44757 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T14:12:50,689 INFO [M:0;a3a61c9ba14f:44757 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/.lastflushedseqids 2024-12-02T14:12:50,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741838_1014 (size=99) 2024-12-02T14:12:50,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741838_1014 (size=99) 2024-12-02T14:12:50,695 INFO [M:0;a3a61c9ba14f:44757 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T14:12:50,695 INFO [M:0;a3a61c9ba14f:44757 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T14:12:50,695 DEBUG [M:0;a3a61c9ba14f:44757 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T14:12:50,695 INFO [M:0;a3a61c9ba14f:44757 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:50,695 DEBUG [M:0;a3a61c9ba14f:44757 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:50,695 DEBUG [M:0;a3a61c9ba14f:44757 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T14:12:50,695 DEBUG [M:0;a3a61c9ba14f:44757 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:50,695 INFO [M:0;a3a61c9ba14f:44757 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-02T14:12:50,716 DEBUG [M:0;a3a61c9ba14f:44757 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aba1c82f1e7f47a8ad2b0c5fff3033c8 is 82, key is hbase:meta,,1/info:regioninfo/1733148770382/Put/seqid=0 2024-12-02T14:12:50,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741839_1015 (size=5672) 2024-12-02T14:12:50,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741839_1015 (size=5672) 2024-12-02T14:12:50,720 INFO [M:0;a3a61c9ba14f:44757 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aba1c82f1e7f47a8ad2b0c5fff3033c8 2024-12-02T14:12:50,737 DEBUG [M:0;a3a61c9ba14f:44757 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/454c85dded624f41b40dc353a8c7174e is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733148770401/Put/seqid=0 2024-12-02T14:12:50,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741840_1016 (size=5275) 2024-12-02T14:12:50,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741840_1016 (size=5275) 2024-12-02T14:12:50,742 INFO [M:0;a3a61c9ba14f:44757 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/454c85dded624f41b40dc353a8c7174e 2024-12-02T14:12:50,757 DEBUG [M:0;a3a61c9ba14f:44757 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/22348dd6ac7a492e912f99a1fd9fc384 is 69, key is a3a61c9ba14f,46109,1733148769607/rs:state/1733148769850/Put/seqid=0 2024-12-02T14:12:50,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741841_1017 (size=5156) 2024-12-02T14:12:50,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741841_1017 (size=5156) 2024-12-02T14:12:50,761 INFO [M:0;a3a61c9ba14f:44757 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/22348dd6ac7a492e912f99a1fd9fc384 2024-12-02T14:12:50,777 DEBUG [M:0;a3a61c9ba14f:44757 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/921ebfc9a71f471cb0ff8bf5ca0e593a is 52, key is load_balancer_on/state:d/1733148770438/Put/seqid=0 2024-12-02T14:12:50,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741842_1018 (size=5056) 2024-12-02T14:12:50,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741842_1018 (size=5056) 2024-12-02T14:12:50,782 INFO [M:0;a3a61c9ba14f:44757 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/921ebfc9a71f471cb0ff8bf5ca0e593a 2024-12-02T14:12:50,786 DEBUG [M:0;a3a61c9ba14f:44757 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aba1c82f1e7f47a8ad2b0c5fff3033c8 as hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/aba1c82f1e7f47a8ad2b0c5fff3033c8 2024-12-02T14:12:50,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:12:50,787 INFO [RS:0;a3a61c9ba14f:46109 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:12:50,787 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46109-0x1009b46d9580001, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:12:50,787 INFO [RS:0;a3a61c9ba14f:46109 {}] regionserver.HRegionServer(1031): Exiting; stopping=a3a61c9ba14f,46109,1733148769607; zookeeper connection closed. 2024-12-02T14:12:50,788 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2a4a07f2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2a4a07f2 2024-12-02T14:12:50,788 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-02T14:12:50,790 INFO [M:0;a3a61c9ba14f:44757 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/aba1c82f1e7f47a8ad2b0c5fff3033c8, entries=8, sequenceid=29, filesize=5.5 K 2024-12-02T14:12:50,791 DEBUG [M:0;a3a61c9ba14f:44757 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/454c85dded624f41b40dc353a8c7174e as hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/454c85dded624f41b40dc353a8c7174e 2024-12-02T14:12:50,794 INFO [M:0;a3a61c9ba14f:44757 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/454c85dded624f41b40dc353a8c7174e, entries=3, sequenceid=29, filesize=5.2 K 2024-12-02T14:12:50,795 DEBUG [M:0;a3a61c9ba14f:44757 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/22348dd6ac7a492e912f99a1fd9fc384 as hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/22348dd6ac7a492e912f99a1fd9fc384 2024-12-02T14:12:50,799 INFO [M:0;a3a61c9ba14f:44757 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/22348dd6ac7a492e912f99a1fd9fc384, entries=1, sequenceid=29, filesize=5.0 K 2024-12-02T14:12:50,800 DEBUG [M:0;a3a61c9ba14f:44757 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/921ebfc9a71f471cb0ff8bf5ca0e593a as hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/921ebfc9a71f471cb0ff8bf5ca0e593a 2024-12-02T14:12:50,805 INFO [M:0;a3a61c9ba14f:44757 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39117/user/jenkins/test-data/c362f4a4-66e3-8b03-5177-472aa91117f6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/921ebfc9a71f471cb0ff8bf5ca0e593a, entries=1, sequenceid=29, filesize=4.9 K 2024-12-02T14:12:50,806 INFO [M:0;a3a61c9ba14f:44757 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=29, compaction requested=false 2024-12-02T14:12:50,807 INFO [M:0;a3a61c9ba14f:44757 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T14:12:50,808 DEBUG [M:0;a3a61c9ba14f:44757 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733148770695Disabling compacts and flushes for region at 1733148770695Disabling writes for close at 1733148770695Obtaining lock to block concurrent updates at 1733148770695Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733148770695Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733148770696 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733148770696Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733148770696Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733148770715 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733148770715Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733148770724 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733148770737 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733148770737Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733148770745 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733148770757 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733148770757Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733148770765 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733148770776 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733148770777 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67110b85: reopening flushed file at 1733148770785 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23796c76: reopening flushed file at 1733148770790 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d9ca5d1: reopening flushed file at 1733148770794 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@759bfc6e: reopening flushed file at 1733148770799 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=29, compaction requested=false at 1733148770806 (+7 ms)Writing region close event to WAL at 1733148770807 (+1 ms)Closed at 1733148770807 2024-12-02T14:12:50,808 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,808 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,808 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,808 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,808 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-02T14:12:50,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44621 is added to blk_1073741830_1006 (size=10311) 2024-12-02T14:12:50,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41803 is added to blk_1073741830_1006 (size=10311) 2024-12-02T14:12:50,811 INFO [M:0;a3a61c9ba14f:44757 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T14:12:50,811 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T14:12:50,811 INFO [M:0;a3a61c9ba14f:44757 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44757 2024-12-02T14:12:50,811 INFO [M:0;a3a61c9ba14f:44757 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T14:12:50,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:12:50,912 INFO [M:0;a3a61c9ba14f:44757 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T14:12:50,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44757-0x1009b46d9580000, quorum=127.0.0.1:59622, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T14:12:50,915 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@663b7fb1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:12:50,915 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@644054b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:12:50,915 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:12:50,915 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47b9368{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:12:50,915 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e9b8f9f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/hadoop.log.dir/,STOPPED} 2024-12-02T14:12:50,916 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:12:50,916 WARN [BP-480779925-172.17.0.2-1733148768815 heartbeating to localhost/127.0.0.1:39117 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:12:50,916 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:12:50,916 WARN [BP-480779925-172.17.0.2-1733148768815 heartbeating to localhost/127.0.0.1:39117 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-480779925-172.17.0.2-1733148768815 (Datanode Uuid 739f7589-b5b1-4cfb-9028-2b490219cf81) service to localhost/127.0.0.1:39117 2024-12-02T14:12:50,917 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/cluster_f7da3f95-9367-0dc7-f5fa-723db7912688/data/data3/current/BP-480779925-172.17.0.2-1733148768815 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:12:50,917 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/cluster_f7da3f95-9367-0dc7-f5fa-723db7912688/data/data4/current/BP-480779925-172.17.0.2-1733148768815 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:12:50,918 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:12:50,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2482618b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T14:12:50,920 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@56836339{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:12:50,921 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:12:50,921 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6162294b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:12:50,921 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40f3733a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/hadoop.log.dir/,STOPPED} 2024-12-02T14:12:50,922 WARN [BP-480779925-172.17.0.2-1733148768815 heartbeating to localhost/127.0.0.1:39117 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T14:12:50,922 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T14:12:50,922 WARN [BP-480779925-172.17.0.2-1733148768815 heartbeating to localhost/127.0.0.1:39117 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-480779925-172.17.0.2-1733148768815 (Datanode Uuid 4561330a-38fa-4adb-8a7a-6207e7654803) service to localhost/127.0.0.1:39117 2024-12-02T14:12:50,922 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T14:12:50,923 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/cluster_f7da3f95-9367-0dc7-f5fa-723db7912688/data/data1/current/BP-480779925-172.17.0.2-1733148768815 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:12:50,923 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/cluster_f7da3f95-9367-0dc7-f5fa-723db7912688/data/data2/current/BP-480779925-172.17.0.2-1733148768815 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T14:12:50,923 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T14:12:50,928 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3885c0c5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T14:12:50,928 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2394ff19{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T14:12:50,929 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T14:12:50,929 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5031c9de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T14:12:50,929 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d118eec{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7e0269b-8677-faf6-3802-15bacf7fc2f1/hadoop.log.dir/,STOPPED} 2024-12-02T14:12:50,934 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T14:12:50,949 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-02T14:12:50,956 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=270 (was 230) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39117 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:39117 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39117 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39117 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39117 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39117 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39117 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/a3a61c9ba14f:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39117 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=129 (was 97) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6044 (was 6125)