2024-11-25 10:28:29,175 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-25 10:28:29,186 main DEBUG Took 0.009349 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-25 10:28:29,186 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-25 10:28:29,186 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-25 10:28:29,187 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-25 10:28:29,188 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 10:28:29,195 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-25 10:28:29,206 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 10:28:29,207 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 10:28:29,208 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 10:28:29,208 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 10:28:29,209 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 10:28:29,209 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 10:28:29,210 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 10:28:29,210 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 10:28:29,210 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 10:28:29,211 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 10:28:29,211 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 10:28:29,212 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 10:28:29,212 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 10:28:29,212 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 10:28:29,213 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 10:28:29,213 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 10:28:29,213 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 10:28:29,214 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 10:28:29,214 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 10:28:29,214 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 10:28:29,215 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 10:28:29,215 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 10:28:29,215 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 10:28:29,215 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-25 10:28:29,216 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 10:28:29,216 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-25 10:28:29,217 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-25 10:28:29,218 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-25 10:28:29,220 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-25 10:28:29,221 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-25 10:28:29,222 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-25 10:28:29,222 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-25 10:28:29,230 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-25 10:28:29,232 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-25 10:28:29,234 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-25 10:28:29,234 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-25 10:28:29,235 main DEBUG createAppenders(={Console}) 2024-11-25 10:28:29,235 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-25 10:28:29,236 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-25 10:28:29,236 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-25 10:28:29,237 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-25 10:28:29,237 main DEBUG OutputStream closed 2024-11-25 10:28:29,237 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-25 10:28:29,238 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-25 10:28:29,238 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-25 10:28:29,318 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-25 10:28:29,320 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-25 10:28:29,321 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-25 10:28:29,322 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-25 10:28:29,322 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-25 10:28:29,323 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-25 10:28:29,323 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-25 10:28:29,323 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-25 10:28:29,323 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-25 10:28:29,324 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-25 10:28:29,324 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-25 10:28:29,324 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-25 10:28:29,325 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-25 10:28:29,325 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-25 10:28:29,325 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-25 10:28:29,325 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-25 10:28:29,326 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-25 10:28:29,326 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-25 10:28:29,328 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-25 10:28:29,329 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-25 10:28:29,329 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-25 10:28:29,330 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-25T10:28:29,595 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af 2024-11-25 10:28:29,597 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-25 10:28:29,598 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-25T10:28:29,607 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-25T10:28:29,639 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=217, ProcessCount=11, AvailableMemoryMB=2098 2024-11-25T10:28:29,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T10:28:29,657 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/cluster_ddd9d406-2133-59c1-e409-4ead031546ea, deleteOnExit=true 2024-11-25T10:28:29,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T10:28:29,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/test.cache.data in system properties and HBase conf 2024-11-25T10:28:29,659 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T10:28:29,659 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/hadoop.log.dir in system properties and HBase conf 2024-11-25T10:28:29,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T10:28:29,660 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T10:28:29,661 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T10:28:29,744 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-25T10:28:29,833 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T10:28:29,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T10:28:29,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T10:28:29,837 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T10:28:29,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T10:28:29,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T10:28:29,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T10:28:29,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T10:28:29,839 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T10:28:29,840 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T10:28:29,840 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/nfs.dump.dir in system properties and HBase conf 2024-11-25T10:28:29,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/java.io.tmpdir in system properties and HBase conf 2024-11-25T10:28:29,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T10:28:29,841 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T10:28:29,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T10:28:30,363 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T10:28:30,759 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-25T10:28:30,858 INFO [Time-limited test {}] log.Log(170): Logging initialized @2369ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-25T10:28:30,975 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:28:31,064 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:28:31,091 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:28:31,091 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:28:31,093 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:28:31,112 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:28:31,115 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:28:31,116 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:28:31,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/java.io.tmpdir/jetty-localhost-33827-hadoop-hdfs-3_4_1-tests_jar-_-any-16575441369932113184/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T10:28:31,378 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:33827} 2024-11-25T10:28:31,378 INFO [Time-limited test {}] server.Server(415): Started @2890ms 2024-11-25T10:28:31,433 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T10:28:31,870 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:28:31,880 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:28:31,884 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:28:31,884 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:28:31,884 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:28:31,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:28:31,887 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:28:32,026 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c2fdbac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/java.io.tmpdir/jetty-localhost-38481-hadoop-hdfs-3_4_1-tests_jar-_-any-7571992109972131367/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:28:32,027 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:38481} 2024-11-25T10:28:32,028 INFO [Time-limited test {}] server.Server(415): Started @3540ms 2024-11-25T10:28:32,089 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:28:32,230 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:28:32,238 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:28:32,244 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:28:32,245 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:28:32,245 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T10:28:32,249 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:28:32,250 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:28:32,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1467625d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/java.io.tmpdir/jetty-localhost-34833-hadoop-hdfs-3_4_1-tests_jar-_-any-8930617400267660019/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:28:32,398 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:34833} 2024-11-25T10:28:32,398 INFO [Time-limited test {}] server.Server(415): Started @3910ms 2024-11-25T10:28:32,400 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:28:32,625 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/cluster_ddd9d406-2133-59c1-e409-4ead031546ea/data/data4/current/BP-190105748-172.17.0.2-1732530510466/current, will proceed with Du for space computation calculation, 2024-11-25T10:28:32,625 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/cluster_ddd9d406-2133-59c1-e409-4ead031546ea/data/data1/current/BP-190105748-172.17.0.2-1732530510466/current, will proceed with Du for space computation calculation, 2024-11-25T10:28:32,625 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/cluster_ddd9d406-2133-59c1-e409-4ead031546ea/data/data3/current/BP-190105748-172.17.0.2-1732530510466/current, will proceed with Du for space computation calculation, 2024-11-25T10:28:32,630 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/cluster_ddd9d406-2133-59c1-e409-4ead031546ea/data/data2/current/BP-190105748-172.17.0.2-1732530510466/current, will proceed with Du for space computation calculation, 2024-11-25T10:28:32,697 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:28:32,699 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:28:32,781 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5c37a8eeeb2e3544 with lease ID 0x172b3bcb4eb4c6f8: Processing first storage report for DS-ab69f370-d9d6-413b-aa64-19c117466e48 from datanode DatanodeRegistration(127.0.0.1:38111, datanodeUuid=e2ef4006-82b2-4d47-8672-8d6f9a5ba728, infoPort=46043, infoSecurePort=0, ipcPort=45047, storageInfo=lv=-57;cid=testClusterID;nsid=246341569;c=1732530510466) 2024-11-25T10:28:32,782 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5c37a8eeeb2e3544 with lease ID 0x172b3bcb4eb4c6f8: from storage DS-ab69f370-d9d6-413b-aa64-19c117466e48 node DatanodeRegistration(127.0.0.1:38111, datanodeUuid=e2ef4006-82b2-4d47-8672-8d6f9a5ba728, infoPort=46043, infoSecurePort=0, ipcPort=45047, storageInfo=lv=-57;cid=testClusterID;nsid=246341569;c=1732530510466), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T10:28:32,783 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf757a992a5182566 with lease ID 0x172b3bcb4eb4c6f9: Processing first storage report for DS-320ed259-2d28-4e10-9092-72d332797e12 from datanode DatanodeRegistration(127.0.0.1:44819, datanodeUuid=6c256c1f-3b9a-445c-a0e1-04d30c215f5e, infoPort=38625, infoSecurePort=0, ipcPort=41187, storageInfo=lv=-57;cid=testClusterID;nsid=246341569;c=1732530510466) 2024-11-25T10:28:32,783 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf757a992a5182566 with lease ID 0x172b3bcb4eb4c6f9: from storage DS-320ed259-2d28-4e10-9092-72d332797e12 node DatanodeRegistration(127.0.0.1:44819, datanodeUuid=6c256c1f-3b9a-445c-a0e1-04d30c215f5e, infoPort=38625, infoSecurePort=0, ipcPort=41187, storageInfo=lv=-57;cid=testClusterID;nsid=246341569;c=1732530510466), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:28:32,783 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5c37a8eeeb2e3544 with lease ID 0x172b3bcb4eb4c6f8: Processing first storage report for DS-670c8cd8-0de9-4c11-a1e4-a7fe102636cb from datanode DatanodeRegistration(127.0.0.1:38111, datanodeUuid=e2ef4006-82b2-4d47-8672-8d6f9a5ba728, infoPort=46043, infoSecurePort=0, ipcPort=45047, storageInfo=lv=-57;cid=testClusterID;nsid=246341569;c=1732530510466) 2024-11-25T10:28:32,784 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5c37a8eeeb2e3544 with lease ID 0x172b3bcb4eb4c6f8: from storage DS-670c8cd8-0de9-4c11-a1e4-a7fe102636cb node DatanodeRegistration(127.0.0.1:38111, datanodeUuid=e2ef4006-82b2-4d47-8672-8d6f9a5ba728, infoPort=46043, infoSecurePort=0, ipcPort=45047, storageInfo=lv=-57;cid=testClusterID;nsid=246341569;c=1732530510466), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T10:28:32,784 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf757a992a5182566 with lease ID 0x172b3bcb4eb4c6f9: Processing first storage report for DS-62e3b062-4dc4-4343-aa87-898589f34423 from datanode DatanodeRegistration(127.0.0.1:44819, datanodeUuid=6c256c1f-3b9a-445c-a0e1-04d30c215f5e, infoPort=38625, infoSecurePort=0, ipcPort=41187, storageInfo=lv=-57;cid=testClusterID;nsid=246341569;c=1732530510466) 2024-11-25T10:28:32,784 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf757a992a5182566 with lease ID 0x172b3bcb4eb4c6f9: from storage DS-62e3b062-4dc4-4343-aa87-898589f34423 node DatanodeRegistration(127.0.0.1:44819, datanodeUuid=6c256c1f-3b9a-445c-a0e1-04d30c215f5e, infoPort=38625, infoSecurePort=0, ipcPort=41187, storageInfo=lv=-57;cid=testClusterID;nsid=246341569;c=1732530510466), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:28:32,833 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af 2024-11-25T10:28:32,917 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/cluster_ddd9d406-2133-59c1-e409-4ead031546ea/zookeeper_0, clientPort=62959, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/cluster_ddd9d406-2133-59c1-e409-4ead031546ea/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/cluster_ddd9d406-2133-59c1-e409-4ead031546ea/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T10:28:32,928 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62959 2024-11-25T10:28:32,943 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:28:32,949 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:28:33,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:28:33,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:28:33,650 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3 with version=8 2024-11-25T10:28:33,650 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/hbase-staging 2024-11-25T10:28:33,765 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-25T10:28:34,058 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:28:34,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:28:34,074 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:28:34,081 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:28:34,081 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:28:34,082 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:28:34,251 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T10:28:34,335 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-25T10:28:34,351 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-25T10:28:34,357 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:28:34,400 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 20331 (auto-detected) 2024-11-25T10:28:34,402 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-25T10:28:34,436 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42855 2024-11-25T10:28:34,461 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42855 connecting to ZooKeeper ensemble=127.0.0.1:62959 2024-11-25T10:28:34,502 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:428550x0, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:28:34,506 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42855-0x1014eab8e8d0000 connected 2024-11-25T10:28:34,569 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:28:34,577 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:28:34,594 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:28:34,600 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3, hbase.cluster.distributed=false 2024-11-25T10:28:34,637 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:28:34,653 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42855 2024-11-25T10:28:34,656 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42855 2024-11-25T10:28:34,658 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42855 2024-11-25T10:28:34,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42855 2024-11-25T10:28:34,672 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42855 2024-11-25T10:28:34,850 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:28:34,853 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:28:34,854 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:28:34,854 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:28:34,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:28:34,855 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:28:34,858 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T10:28:34,861 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:28:34,862 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37987 2024-11-25T10:28:34,864 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37987 connecting to ZooKeeper ensemble=127.0.0.1:62959 2024-11-25T10:28:34,870 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:28:34,885 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:28:34,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:379870x0, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:28:34,897 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379870x0, quorum=127.0.0.1:62959, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:28:34,897 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37987-0x1014eab8e8d0001 connected 2024-11-25T10:28:34,903 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T10:28:34,911 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T10:28:34,914 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T10:28:34,919 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:28:34,920 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37987 2024-11-25T10:28:34,920 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37987 2024-11-25T10:28:34,921 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37987 2024-11-25T10:28:34,922 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37987 2024-11-25T10:28:34,922 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37987 2024-11-25T10:28:34,938 DEBUG [M:0;e2fd862922a0:42855 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e2fd862922a0:42855 2024-11-25T10:28:34,940 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e2fd862922a0,42855,1732530513841 2024-11-25T10:28:34,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:28:34,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:28:34,949 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e2fd862922a0,42855,1732530513841 2024-11-25T10:28:34,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T10:28:34,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:28:34,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:28:34,980 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T10:28:34,981 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e2fd862922a0,42855,1732530513841 from backup master directory 2024-11-25T10:28:34,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e2fd862922a0,42855,1732530513841 2024-11-25T10:28:34,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:28:34,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:28:34,986 WARN [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:28:34,986 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e2fd862922a0,42855,1732530513841 2024-11-25T10:28:34,989 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-25T10:28:34,990 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-25T10:28:35,065 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/hbase.id] with ID: dc7d295a-a776-48d4-9471-1e1029fe14d8 2024-11-25T10:28:35,065 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/.tmp/hbase.id 2024-11-25T10:28:35,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741826_1002 (size=42) 2024-11-25T10:28:35,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741826_1002 (size=42) 2024-11-25T10:28:35,094 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/.tmp/hbase.id]:[hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/hbase.id] 2024-11-25T10:28:35,152 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:28:35,158 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T10:28:35,179 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-25T10:28:35,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:28:35,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:28:35,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:28:35,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:28:35,222 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T10:28:35,224 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T10:28:35,231 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:28:35,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741828_1004 (size=1189) 2024-11-25T10:28:35,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741828_1004 (size=1189) 2024-11-25T10:28:35,300 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store 2024-11-25T10:28:35,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741829_1005 (size=34) 2024-11-25T10:28:35,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741829_1005 (size=34) 2024-11-25T10:28:35,333 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-25T10:28:35,337 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:28:35,338 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T10:28:35,339 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:28:35,339 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:28:35,340 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T10:28:35,340 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:28:35,340 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:28:35,341 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732530515338Disabling compacts and flushes for region at 1732530515338Disabling writes for close at 1732530515340 (+2 ms)Writing region close event to WAL at 1732530515340Closed at 1732530515340 2024-11-25T10:28:35,344 WARN [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/.initializing 2024-11-25T10:28:35,344 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/WALs/e2fd862922a0,42855,1732530513841 2024-11-25T10:28:35,374 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C42855%2C1732530513841, suffix=, logDir=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/WALs/e2fd862922a0,42855,1732530513841, archiveDir=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/oldWALs, maxLogs=10 2024-11-25T10:28:35,386 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C42855%2C1732530513841.1732530515380 2024-11-25T10:28:35,415 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/WALs/e2fd862922a0,42855,1732530513841/e2fd862922a0%2C42855%2C1732530513841.1732530515380 2024-11-25T10:28:35,428 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38625:38625),(127.0.0.1/127.0.0.1:46043:46043)] 2024-11-25T10:28:35,433 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:28:35,433 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:28:35,437 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:28:35,438 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:28:35,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:28:35,504 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T10:28:35,509 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:35,512 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:28:35,513 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:28:35,516 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T10:28:35,516 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:35,517 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:28:35,517 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:28:35,520 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T10:28:35,520 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:35,521 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:28:35,522 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:28:35,525 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T10:28:35,525 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:35,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:28:35,526 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:28:35,531 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:28:35,532 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:28:35,539 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:28:35,540 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:28:35,544 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T10:28:35,551 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:28:35,555 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:28:35,557 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866917, jitterRate=0.10234206914901733}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T10:28:35,563 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732530515450Initializing all the Stores at 1732530515452 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530515453 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530515453Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530515454 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530515454Cleaning up temporary data from old regions at 1732530515540 (+86 ms)Region opened successfully at 1732530515563 (+23 ms) 2024-11-25T10:28:35,564 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T10:28:35,602 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@236a30fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:28:35,634 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T10:28:35,645 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T10:28:35,646 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T10:28:35,648 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T10:28:35,650 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-25T10:28:35,655 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-25T10:28:35,655 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T10:28:35,680 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T10:28:35,688 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T10:28:35,691 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T10:28:35,693 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T10:28:35,694 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T10:28:35,696 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T10:28:35,698 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T10:28:35,701 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T10:28:35,702 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T10:28:35,703 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T10:28:35,706 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T10:28:35,722 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T10:28:35,723 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T10:28:35,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T10:28:35,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T10:28:35,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:28:35,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:28:35,730 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e2fd862922a0,42855,1732530513841, sessionid=0x1014eab8e8d0000, setting cluster-up flag (Was=false) 2024-11-25T10:28:35,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:28:35,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:28:35,749 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T10:28:35,750 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2fd862922a0,42855,1732530513841 2024-11-25T10:28:35,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:28:35,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:28:35,767 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T10:28:35,769 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2fd862922a0,42855,1732530513841 2024-11-25T10:28:35,775 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T10:28:35,827 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(746): ClusterId : dc7d295a-a776-48d4-9471-1e1029fe14d8 2024-11-25T10:28:35,830 DEBUG [RS:0;e2fd862922a0:37987 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T10:28:35,834 DEBUG [RS:0;e2fd862922a0:37987 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T10:28:35,835 DEBUG [RS:0;e2fd862922a0:37987 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T10:28:35,838 DEBUG [RS:0;e2fd862922a0:37987 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T10:28:35,838 DEBUG [RS:0;e2fd862922a0:37987 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dc1387d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:28:35,848 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T10:28:35,852 DEBUG [RS:0;e2fd862922a0:37987 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e2fd862922a0:37987 2024-11-25T10:28:35,855 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T10:28:35,855 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T10:28:35,855 DEBUG [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T10:28:35,857 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2fd862922a0,42855,1732530513841 with port=37987, startcode=1732530514773 2024-11-25T10:28:35,858 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T10:28:35,864 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T10:28:35,868 DEBUG [RS:0;e2fd862922a0:37987 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T10:28:35,870 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e2fd862922a0,42855,1732530513841 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T10:28:35,878 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:28:35,878 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:28:35,878 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:28:35,879 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:28:35,879 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e2fd862922a0:0, corePoolSize=10, maxPoolSize=10 2024-11-25T10:28:35,879 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:28:35,879 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:28:35,880 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:28:35,882 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732530545882 2024-11-25T10:28:35,884 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T10:28:35,885 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:28:35,885 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T10:28:35,885 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T10:28:35,889 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T10:28:35,889 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T10:28:35,890 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T10:28:35,890 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T10:28:35,892 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:35,891 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:35,893 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T10:28:35,894 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T10:28:35,895 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T10:28:35,896 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T10:28:35,898 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T10:28:35,898 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T10:28:35,901 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530515900,5,FailOnTimeoutGroup] 2024-11-25T10:28:35,901 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530515901,5,FailOnTimeoutGroup] 2024-11-25T10:28:35,902 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:35,902 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T10:28:35,903 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:35,903 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:35,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:28:35,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:28:35,911 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T10:28:35,911 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3 2024-11-25T10:28:35,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:28:35,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:28:35,924 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:28:35,926 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T10:28:35,928 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T10:28:35,929 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:35,929 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:28:35,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T10:28:35,932 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T10:28:35,932 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:35,933 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:28:35,933 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T10:28:35,935 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T10:28:35,935 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:35,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:28:35,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T10:28:35,939 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T10:28:35,939 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:35,940 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:28:35,940 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T10:28:35,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740 2024-11-25T10:28:35,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740 2024-11-25T10:28:35,946 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T10:28:35,946 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T10:28:35,947 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T10:28:35,949 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34147, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T10:28:35,951 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T10:28:35,955 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:28:35,957 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42855 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2fd862922a0,37987,1732530514773 2024-11-25T10:28:35,960 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42855 {}] master.ServerManager(517): Registering regionserver=e2fd862922a0,37987,1732530514773 2024-11-25T10:28:35,961 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=771229, jitterRate=-0.019332274794578552}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T10:28:35,963 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732530515924Initializing all the Stores at 1732530515926 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530515926Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530515926Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530515926Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530515926Cleaning up temporary data from old regions at 1732530515946 (+20 ms)Region opened successfully at 1732530515963 (+17 ms) 2024-11-25T10:28:35,964 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T10:28:35,964 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T10:28:35,964 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T10:28:35,964 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T10:28:35,964 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T10:28:35,965 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T10:28:35,965 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732530515964Disabling compacts and flushes for region at 1732530515964Disabling writes for close at 1732530515964Writing region close event to WAL at 1732530515965 (+1 ms)Closed at 1732530515965 2024-11-25T10:28:35,969 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:28:35,969 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T10:28:35,975 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T10:28:35,977 DEBUG [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3 2024-11-25T10:28:35,977 DEBUG [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39029 2024-11-25T10:28:35,977 DEBUG [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T10:28:35,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:28:35,982 DEBUG [RS:0;e2fd862922a0:37987 {}] zookeeper.ZKUtil(111): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2fd862922a0,37987,1732530514773 2024-11-25T10:28:35,982 WARN [RS:0;e2fd862922a0:37987 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:28:35,982 INFO [RS:0;e2fd862922a0:37987 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:28:35,982 DEBUG [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773 2024-11-25T10:28:35,984 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2fd862922a0,37987,1732530514773] 2024-11-25T10:28:35,984 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T10:28:35,987 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T10:28:36,013 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T10:28:36,025 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T10:28:36,030 INFO [RS:0;e2fd862922a0:37987 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T10:28:36,030 INFO [RS:0;e2fd862922a0:37987 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,031 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T10:28:36,037 INFO [RS:0;e2fd862922a0:37987 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T10:28:36,038 INFO [RS:0;e2fd862922a0:37987 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,038 DEBUG [RS:0;e2fd862922a0:37987 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:28:36,038 DEBUG [RS:0;e2fd862922a0:37987 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:28:36,038 DEBUG [RS:0;e2fd862922a0:37987 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:28:36,039 DEBUG [RS:0;e2fd862922a0:37987 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:28:36,039 DEBUG [RS:0;e2fd862922a0:37987 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:28:36,039 DEBUG [RS:0;e2fd862922a0:37987 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:28:36,039 DEBUG [RS:0;e2fd862922a0:37987 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:28:36,039 DEBUG [RS:0;e2fd862922a0:37987 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:28:36,040 DEBUG [RS:0;e2fd862922a0:37987 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:28:36,040 DEBUG [RS:0;e2fd862922a0:37987 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:28:36,040 DEBUG [RS:0;e2fd862922a0:37987 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:28:36,040 DEBUG [RS:0;e2fd862922a0:37987 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:28:36,040 DEBUG [RS:0;e2fd862922a0:37987 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:28:36,040 DEBUG [RS:0;e2fd862922a0:37987 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:28:36,042 INFO [RS:0;e2fd862922a0:37987 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,043 INFO [RS:0;e2fd862922a0:37987 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,043 INFO [RS:0;e2fd862922a0:37987 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,043 INFO [RS:0;e2fd862922a0:37987 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,043 INFO [RS:0;e2fd862922a0:37987 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,044 INFO [RS:0;e2fd862922a0:37987 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,37987,1732530514773-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:28:36,062 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T10:28:36,064 INFO [RS:0;e2fd862922a0:37987 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,37987,1732530514773-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,064 INFO [RS:0;e2fd862922a0:37987 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,064 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.Replication(171): e2fd862922a0,37987,1732530514773 started 2024-11-25T10:28:36,082 INFO [RS:0;e2fd862922a0:37987 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,082 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(1482): Serving as e2fd862922a0,37987,1732530514773, RpcServer on e2fd862922a0/172.17.0.2:37987, sessionid=0x1014eab8e8d0001 2024-11-25T10:28:36,083 DEBUG [RS:0;e2fd862922a0:37987 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T10:28:36,083 DEBUG [RS:0;e2fd862922a0:37987 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2fd862922a0,37987,1732530514773 2024-11-25T10:28:36,083 DEBUG [RS:0;e2fd862922a0:37987 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,37987,1732530514773' 2024-11-25T10:28:36,083 DEBUG [RS:0;e2fd862922a0:37987 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T10:28:36,084 DEBUG [RS:0;e2fd862922a0:37987 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T10:28:36,085 DEBUG [RS:0;e2fd862922a0:37987 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T10:28:36,085 DEBUG [RS:0;e2fd862922a0:37987 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T10:28:36,085 DEBUG [RS:0;e2fd862922a0:37987 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2fd862922a0,37987,1732530514773 2024-11-25T10:28:36,085 DEBUG [RS:0;e2fd862922a0:37987 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,37987,1732530514773' 2024-11-25T10:28:36,085 DEBUG [RS:0;e2fd862922a0:37987 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T10:28:36,086 DEBUG [RS:0;e2fd862922a0:37987 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T10:28:36,086 DEBUG [RS:0;e2fd862922a0:37987 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T10:28:36,086 INFO [RS:0;e2fd862922a0:37987 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T10:28:36,086 INFO [RS:0;e2fd862922a0:37987 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T10:28:36,138 WARN [e2fd862922a0:42855 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T10:28:36,195 INFO [RS:0;e2fd862922a0:37987 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C37987%2C1732530514773, suffix=, logDir=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773, archiveDir=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/oldWALs, maxLogs=32 2024-11-25T10:28:36,197 INFO [RS:0;e2fd862922a0:37987 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C37987%2C1732530514773.1732530516197 2024-11-25T10:28:36,206 INFO [RS:0;e2fd862922a0:37987 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530516197 2024-11-25T10:28:36,207 DEBUG [RS:0;e2fd862922a0:37987 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46043:46043),(127.0.0.1/127.0.0.1:38625:38625)] 2024-11-25T10:28:36,391 DEBUG [e2fd862922a0:42855 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T10:28:36,405 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e2fd862922a0,37987,1732530514773 2024-11-25T10:28:36,414 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2fd862922a0,37987,1732530514773, state=OPENING 2024-11-25T10:28:36,420 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T10:28:36,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:28:36,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:28:36,424 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:28:36,424 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:28:36,426 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T10:28:36,429 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e2fd862922a0,37987,1732530514773}] 2024-11-25T10:28:36,613 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T10:28:36,617 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51079, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T10:28:36,630 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T10:28:36,631 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:28:36,634 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C37987%2C1732530514773.meta, suffix=.meta, logDir=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773, archiveDir=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/oldWALs, maxLogs=32 2024-11-25T10:28:36,637 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C37987%2C1732530514773.meta.1732530516636.meta 2024-11-25T10:28:36,646 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.meta.1732530516636.meta 2024-11-25T10:28:36,651 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38625:38625),(127.0.0.1/127.0.0.1:46043:46043)] 2024-11-25T10:28:36,652 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:28:36,654 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T10:28:36,657 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T10:28:36,663 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T10:28:36,667 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T10:28:36,668 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:28:36,669 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T10:28:36,669 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T10:28:36,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T10:28:36,674 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T10:28:36,674 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:36,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:28:36,675 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T10:28:36,677 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T10:28:36,677 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:36,678 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:28:36,678 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T10:28:36,680 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T10:28:36,680 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:36,681 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:28:36,681 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T10:28:36,682 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T10:28:36,682 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:36,683 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:28:36,683 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T10:28:36,685 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740 2024-11-25T10:28:36,688 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740 2024-11-25T10:28:36,691 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T10:28:36,691 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T10:28:36,692 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T10:28:36,695 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T10:28:36,698 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=861951, jitterRate=0.09602753818035126}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T10:28:36,698 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T10:28:36,700 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732530516669Writing region info on filesystem at 1732530516669Initializing all the Stores at 1732530516672 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530516672Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530516672Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530516672Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530516672Cleaning up temporary data from old regions at 1732530516691 (+19 ms)Running coprocessor post-open hooks at 1732530516698 (+7 ms)Region opened successfully at 1732530516700 (+2 ms) 2024-11-25T10:28:36,709 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732530516600 2024-11-25T10:28:36,723 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T10:28:36,723 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T10:28:36,725 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e2fd862922a0,37987,1732530514773 2024-11-25T10:28:36,728 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2fd862922a0,37987,1732530514773, state=OPEN 2024-11-25T10:28:36,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T10:28:36,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T10:28:36,739 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:28:36,739 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:28:36,739 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e2fd862922a0,37987,1732530514773 2024-11-25T10:28:36,745 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T10:28:36,746 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e2fd862922a0,37987,1732530514773 in 311 msec 2024-11-25T10:28:36,753 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T10:28:36,753 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 772 msec 2024-11-25T10:28:36,755 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:28:36,755 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T10:28:36,782 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T10:28:36,784 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2fd862922a0,37987,1732530514773, seqNum=-1] 2024-11-25T10:28:36,813 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T10:28:36,815 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33429, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T10:28:36,838 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0330 sec 2024-11-25T10:28:36,839 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732530516838, completionTime=-1 2024-11-25T10:28:36,843 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T10:28:36,843 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T10:28:36,878 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T10:28:36,878 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732530576878 2024-11-25T10:28:36,878 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732530636878 2024-11-25T10:28:36,879 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 35 msec 2024-11-25T10:28:36,882 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,42855,1732530513841-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,883 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,42855,1732530513841-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,883 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,42855,1732530513841-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,885 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e2fd862922a0:42855, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,885 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,886 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,892 DEBUG [master/e2fd862922a0:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T10:28:36,918 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.932sec 2024-11-25T10:28:36,920 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T10:28:36,921 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T10:28:36,922 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T10:28:36,923 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T10:28:36,923 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T10:28:36,924 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,42855,1732530513841-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:28:36,924 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,42855,1732530513841-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T10:28:36,934 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T10:28:36,935 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T10:28:36,936 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,42855,1732530513841-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:28:36,938 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e87ad9c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:28:36,940 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-25T10:28:36,941 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-25T10:28:36,945 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e2fd862922a0,42855,-1 for getting cluster id 2024-11-25T10:28:36,949 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T10:28:36,957 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dc7d295a-a776-48d4-9471-1e1029fe14d8' 2024-11-25T10:28:36,960 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T10:28:36,960 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dc7d295a-a776-48d4-9471-1e1029fe14d8" 2024-11-25T10:28:36,963 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34d065df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:28:36,963 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2fd862922a0,42855,-1] 2024-11-25T10:28:36,966 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T10:28:36,968 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:28:36,970 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37128, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T10:28:36,974 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd7b3c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:28:36,974 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T10:28:36,982 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2fd862922a0,37987,1732530514773, seqNum=-1] 2024-11-25T10:28:36,983 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T10:28:36,985 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42356, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T10:28:37,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e2fd862922a0,42855,1732530513841 2024-11-25T10:28:37,012 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:28:37,023 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T10:28:37,028 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T10:28:37,034 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is e2fd862922a0,42855,1732530513841 2024-11-25T10:28:37,037 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@69d3c562 2024-11-25T10:28:37,038 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T10:28:37,041 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37130, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T10:28:37,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42855 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T10:28:37,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42855 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T10:28:37,047 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42855 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T10:28:37,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42855 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-25T10:28:37,059 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T10:28:37,061 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42855 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-25T10:28:37,062 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:37,064 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T10:28:37,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T10:28:37,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741835_1011 (size=389) 2024-11-25T10:28:37,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741835_1011 (size=389) 2024-11-25T10:28:37,114 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1e03c635290cb3529695c45e3af34eeb, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3 2024-11-25T10:28:37,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741836_1012 (size=72) 2024-11-25T10:28:37,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741836_1012 (size=72) 2024-11-25T10:28:37,132 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:28:37,133 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 1e03c635290cb3529695c45e3af34eeb, disabling compactions & flushes 2024-11-25T10:28:37,133 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. 2024-11-25T10:28:37,133 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. 2024-11-25T10:28:37,133 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. after waiting 0 ms 2024-11-25T10:28:37,133 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. 2024-11-25T10:28:37,134 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. 2024-11-25T10:28:37,134 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1e03c635290cb3529695c45e3af34eeb: Waiting for close lock at 1732530517133Disabling compacts and flushes for region at 1732530517133Disabling writes for close at 1732530517133Writing region close event to WAL at 1732530517133Closed at 1732530517134 (+1 ms) 2024-11-25T10:28:37,137 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T10:28:37,143 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732530517137"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732530517137"}]},"ts":"1732530517137"} 2024-11-25T10:28:37,150 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T10:28:37,152 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T10:28:37,156 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732530517152"}]},"ts":"1732530517152"} 2024-11-25T10:28:37,162 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-25T10:28:37,164 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1e03c635290cb3529695c45e3af34eeb, ASSIGN}] 2024-11-25T10:28:37,167 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1e03c635290cb3529695c45e3af34eeb, ASSIGN 2024-11-25T10:28:37,169 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1e03c635290cb3529695c45e3af34eeb, ASSIGN; state=OFFLINE, location=e2fd862922a0,37987,1732530514773; forceNewPlan=false, retain=false 2024-11-25T10:28:37,321 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1e03c635290cb3529695c45e3af34eeb, regionState=OPENING, regionLocation=e2fd862922a0,37987,1732530514773 2024-11-25T10:28:37,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1e03c635290cb3529695c45e3af34eeb, ASSIGN because future has completed 2024-11-25T10:28:37,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1e03c635290cb3529695c45e3af34eeb, server=e2fd862922a0,37987,1732530514773}] 2024-11-25T10:28:37,488 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. 2024-11-25T10:28:37,489 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1e03c635290cb3529695c45e3af34eeb, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb.', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:28:37,489 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:28:37,489 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:28:37,489 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:28:37,490 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:28:37,492 INFO [StoreOpener-1e03c635290cb3529695c45e3af34eeb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:28:37,495 INFO [StoreOpener-1e03c635290cb3529695c45e3af34eeb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e03c635290cb3529695c45e3af34eeb columnFamilyName info 2024-11-25T10:28:37,495 DEBUG [StoreOpener-1e03c635290cb3529695c45e3af34eeb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:28:37,496 INFO [StoreOpener-1e03c635290cb3529695c45e3af34eeb-1 {}] regionserver.HStore(327): Store=1e03c635290cb3529695c45e3af34eeb/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:28:37,496 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:28:37,498 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:28:37,498 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:28:37,499 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:28:37,499 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:28:37,502 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:28:37,506 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:28:37,507 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1e03c635290cb3529695c45e3af34eeb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=836542, jitterRate=0.06371933221817017}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T10:28:37,507 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:28:37,509 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1e03c635290cb3529695c45e3af34eeb: Running coprocessor pre-open hook at 1732530517490Writing region info on filesystem at 1732530517490Initializing all the Stores at 1732530517491 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530517492 (+1 ms)Cleaning up temporary data from old regions at 1732530517499 (+7 ms)Running coprocessor post-open hooks at 1732530517507 (+8 ms)Region opened successfully at 1732530517509 (+2 ms) 2024-11-25T10:28:37,512 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb., pid=6, masterSystemTime=1732530517481 2024-11-25T10:28:37,516 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. 2024-11-25T10:28:37,516 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. 2024-11-25T10:28:37,517 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1e03c635290cb3529695c45e3af34eeb, regionState=OPEN, openSeqNum=2, regionLocation=e2fd862922a0,37987,1732530514773 2024-11-25T10:28:37,522 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1e03c635290cb3529695c45e3af34eeb, server=e2fd862922a0,37987,1732530514773 because future has completed 2024-11-25T10:28:37,552 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T10:28:37,554 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1e03c635290cb3529695c45e3af34eeb, server=e2fd862922a0,37987,1732530514773 in 198 msec 2024-11-25T10:28:37,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T10:28:37,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=1e03c635290cb3529695c45e3af34eeb, ASSIGN in 389 msec 2024-11-25T10:28:37,560 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T10:28:37,560 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732530517560"}]},"ts":"1732530517560"} 2024-11-25T10:28:37,564 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-25T10:28:37,566 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T10:28:37,570 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 516 msec 2024-11-25T10:28:42,171 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-25T10:28:42,213 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T10:28:42,215 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-25T10:28:44,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T10:28:44,328 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-25T10:28:44,329 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-25T10:28:44,329 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-25T10:28:44,330 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T10:28:44,330 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-25T10:28:44,331 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-25T10:28:44,331 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-25T10:28:47,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T10:28:47,113 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-25T10:28:47,117 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-25T10:28:47,126 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-25T10:28:47,126 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. 2024-11-25T10:28:47,127 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C37987%2C1732530514773.1732530527127 2024-11-25T10:28:47,141 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:28:47,141 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:28:47,141 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:28:47,141 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:28:47,142 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:28:47,142 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530516197 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530527127 2024-11-25T10:28:47,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741833_1009 (size=451) 2024-11-25T10:28:47,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741833_1009 (size=451) 2024-11-25T10:28:47,152 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38625:38625),(127.0.0.1/127.0.0.1:46043:46043)] 2024-11-25T10:28:47,153 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530516197 to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/oldWALs/e2fd862922a0%2C37987%2C1732530514773.1732530516197 2024-11-25T10:28:47,166 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb., hostname=e2fd862922a0,37987,1732530514773, seqNum=2] 2024-11-25T10:28:59,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37987 {}] regionserver.HRegion(8855): Flush requested on 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:28:59,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1e03c635290cb3529695c45e3af34eeb 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T10:28:59,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/d95f9bc33a874272929a659acd09837c is 1080, key is row0001/info:/1732530527169/Put/seqid=0 2024-11-25T10:28:59,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741838_1014 (size=12509) 2024-11-25T10:28:59,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741838_1014 (size=12509) 2024-11-25T10:28:59,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/d95f9bc33a874272929a659acd09837c 2024-11-25T10:28:59,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/d95f9bc33a874272929a659acd09837c as hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/d95f9bc33a874272929a659acd09837c 2024-11-25T10:28:59,336 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/d95f9bc33a874272929a659acd09837c, entries=7, sequenceid=11, filesize=12.2 K 2024-11-25T10:28:59,344 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1e03c635290cb3529695c45e3af34eeb in 140ms, sequenceid=11, compaction requested=false 2024-11-25T10:28:59,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1e03c635290cb3529695c45e3af34eeb: 2024-11-25T10:29:02,830 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T10:29:07,213 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C37987%2C1732530514773.1732530547212 2024-11-25T10:29:07,421 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:07,422 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:07,422 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:07,422 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:07,422 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:07,422 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:07,423 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530527127 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530547212 2024-11-25T10:29:07,424 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38625:38625),(127.0.0.1/127.0.0.1:46043:46043)] 2024-11-25T10:29:07,424 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530527127 is not closed yet, will try archiving it next time 2024-11-25T10:29:07,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741837_1013 (size=12399) 2024-11-25T10:29:07,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741837_1013 (size=12399) 2024-11-25T10:29:07,627 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:09,831 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:12,036 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:14,240 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:14,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37987 {}] regionserver.HRegion(8855): Flush requested on 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:29:14,241 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1e03c635290cb3529695c45e3af34eeb 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T10:29:14,443 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:14,451 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/02ef13054db74e4d867dededfc413663 is 1080, key is row0008/info:/1732530541201/Put/seqid=0 2024-11-25T10:29:14,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741840_1016 (size=12509) 2024-11-25T10:29:14,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741840_1016 (size=12509) 2024-11-25T10:29:14,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/02ef13054db74e4d867dededfc413663 2024-11-25T10:29:14,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/02ef13054db74e4d867dededfc413663 as hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/02ef13054db74e4d867dededfc413663 2024-11-25T10:29:14,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/02ef13054db74e4d867dededfc413663, entries=7, sequenceid=21, filesize=12.2 K 2024-11-25T10:29:14,691 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:14,691 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1e03c635290cb3529695c45e3af34eeb in 451ms, sequenceid=21, compaction requested=false 2024-11-25T10:29:14,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1e03c635290cb3529695c45e3af34eeb: 2024-11-25T10:29:14,692 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-25T10:29:14,692 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:29:14,693 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/d95f9bc33a874272929a659acd09837c because midkey is the same as first or last row 2024-11-25T10:29:16,445 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:16,938 INFO [master/e2fd862922a0:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-25T10:29:16,938 INFO [master/e2fd862922a0:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-25T10:29:18,650 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:18,652 WARN [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:18,653 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C37987%2C1732530514773:(num 1732530547212) roll requested 2024-11-25T10:29:18,653 INFO [regionserver/e2fd862922a0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C37987%2C1732530514773.1732530558653 2024-11-25T10:29:18,862 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:18,862 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:18,862 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:18,862 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:18,863 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:18,863 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:18,863 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530547212 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530558653 2024-11-25T10:29:18,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741839_1015 (size=7739) 2024-11-25T10:29:18,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741839_1015 (size=7739) 2024-11-25T10:29:18,866 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46043:46043),(127.0.0.1/127.0.0.1:38625:38625)] 2024-11-25T10:29:18,867 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530547212 is not closed yet, will try archiving it next time 2024-11-25T10:29:18,867 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530527127 to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/oldWALs/e2fd862922a0%2C37987%2C1732530514773.1732530527127 2024-11-25T10:29:20,854 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK], DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK]] 2024-11-25T10:29:22,489 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1e03c635290cb3529695c45e3af34eeb, had cached 0 bytes from a total of 25018 2024-11-25T10:29:23,060 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK], DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK]] 2024-11-25T10:29:25,265 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK], DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK]] 2024-11-25T10:29:27,469 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK], DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK]] 2024-11-25T10:29:29,471 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T10:29:29,472 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C37987%2C1732530514773.1732530569472 2024-11-25T10:29:32,831 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T10:29:34,497 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK], DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK]] 2024-11-25T10:29:34,499 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK], DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK]] 2024-11-25T10:29:34,499 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C37987%2C1732530514773:(num 1732530569472) roll requested 2024-11-25T10:29:34,499 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:34,499 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:34,500 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:34,500 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:34,500 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:34,500 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530558653 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530569472 2024-11-25T10:29:34,501 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46043:46043),(127.0.0.1/127.0.0.1:38625:38625)] 2024-11-25T10:29:34,501 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530558653 is not closed yet, will try archiving it next time 2024-11-25T10:29:34,501 INFO [regionserver/e2fd862922a0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C37987%2C1732530514773.1732530574501 2024-11-25T10:29:34,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741841_1017 (size=4753) 2024-11-25T10:29:34,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741841_1017 (size=4753) 2024-11-25T10:29:39,505 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK], DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK]] 2024-11-25T10:29:39,505 WARN [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK], DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK]] 2024-11-25T10:29:39,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37987 {}] regionserver.HRegion(8855): Flush requested on 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:29:39,506 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1e03c635290cb3529695c45e3af34eeb 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T10:29:39,518 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK], DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK]] 2024-11-25T10:29:39,518 WARN [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK], DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK]] 2024-11-25T10:29:41,506 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T10:29:44,509 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK], DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK]] 2024-11-25T10:29:44,509 WARN [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK], DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK]] 2024-11-25T10:29:44,509 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:44,509 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:44,509 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:44,509 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:44,509 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:44,510 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530569472 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530574501 2024-11-25T10:29:44,511 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38625:38625),(127.0.0.1/127.0.0.1:46043:46043)] 2024-11-25T10:29:44,511 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530569472 is not closed yet, will try archiving it next time 2024-11-25T10:29:44,511 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C37987%2C1732530514773:(num 1732530574501) roll requested 2024-11-25T10:29:44,511 INFO [regionserver/e2fd862922a0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C37987%2C1732530514773.1732530584511 2024-11-25T10:29:44,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741842_1018 (size=1569) 2024-11-25T10:29:44,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741842_1018 (size=1569) 2024-11-25T10:29:44,515 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/427dd55d7abb47e7a61d174c4abe8517 is 1080, key is row0015/info:/1732530556243/Put/seqid=0 2024-11-25T10:29:44,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741844_1020 (size=12509) 2024-11-25T10:29:44,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741844_1020 (size=12509) 2024-11-25T10:29:44,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/427dd55d7abb47e7a61d174c4abe8517 2024-11-25T10:29:44,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/427dd55d7abb47e7a61d174c4abe8517 as hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/427dd55d7abb47e7a61d174c4abe8517 2024-11-25T10:29:44,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/427dd55d7abb47e7a61d174c4abe8517, entries=7, sequenceid=31, filesize=12.2 K 2024-11-25T10:29:49,518 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:49,518 WARN [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:49,550 INFO [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:49,550 WARN [FSHLog-0-hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3-prefix:e2fd862922a0,37987,1732530514773 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44819,DS-320ed259-2d28-4e10-9092-72d332797e12,DISK], DatanodeInfoWithStorage[127.0.0.1:38111,DS-ab69f370-d9d6-413b-aa64-19c117466e48,DISK]] 2024-11-25T10:29:49,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1e03c635290cb3529695c45e3af34eeb in 10044ms, sequenceid=31, compaction requested=true 2024-11-25T10:29:49,550 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1e03c635290cb3529695c45e3af34eeb: 2024-11-25T10:29:49,550 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,550 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,550 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-25T10:29:49,550 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,550 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:29:49,550 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,550 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/d95f9bc33a874272929a659acd09837c because midkey is the same as first or last row 2024-11-25T10:29:49,550 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530574501 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530584511 2024-11-25T10:29:49,551 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38625:38625),(127.0.0.1/127.0.0.1:46043:46043)] 2024-11-25T10:29:49,551 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530574501 is not closed yet, will try archiving it next time 2024-11-25T10:29:49,551 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530547212 to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/oldWALs/e2fd862922a0%2C37987%2C1732530514773.1732530547212 2024-11-25T10:29:49,552 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C37987%2C1732530514773:(num 1732530584511) roll requested 2024-11-25T10:29:49,552 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C37987%2C1732530514773.1732530589552 2024-11-25T10:29:49,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1e03c635290cb3529695c45e3af34eeb:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T10:29:49,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741843_1019 (size=438) 2024-11-25T10:29:49,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741843_1019 (size=438) 2024-11-25T10:29:49,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:29:49,555 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530558653 to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/oldWALs/e2fd862922a0%2C37987%2C1732530514773.1732530558653 2024-11-25T10:29:49,556 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T10:29:49,557 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530569472 to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/oldWALs/e2fd862922a0%2C37987%2C1732530514773.1732530569472 2024-11-25T10:29:49,559 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T10:29:49,560 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,560 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,560 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,560 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,560 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,560 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.HStore(1541): 1e03c635290cb3529695c45e3af34eeb/info is initiating minor compaction (all files) 2024-11-25T10:29:49,560 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530584511 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530589552 2024-11-25T10:29:49,561 INFO [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1e03c635290cb3529695c45e3af34eeb/info in TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. 2024-11-25T10:29:49,561 INFO [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/d95f9bc33a874272929a659acd09837c, hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/02ef13054db74e4d867dededfc413663, hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/427dd55d7abb47e7a61d174c4abe8517] into tmpdir=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp, totalSize=36.6 K 2024-11-25T10:29:49,562 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] compactions.Compactor(225): Compacting d95f9bc33a874272929a659acd09837c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732530527169 2024-11-25T10:29:49,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741845_1021 (size=93) 2024-11-25T10:29:49,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741845_1021 (size=93) 2024-11-25T10:29:49,563 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] compactions.Compactor(225): Compacting 02ef13054db74e4d867dededfc413663, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732530541201 2024-11-25T10:29:49,563 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530574501 is not closed yet, will try archiving it next time 2024-11-25T10:29:49,564 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] compactions.Compactor(225): Compacting 427dd55d7abb47e7a61d174c4abe8517, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732530556243 2024-11-25T10:29:49,564 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530584511 to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/oldWALs/e2fd862922a0%2C37987%2C1732530514773.1732530584511 2024-11-25T10:29:49,565 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46043:46043),(127.0.0.1/127.0.0.1:38625:38625)] 2024-11-25T10:29:49,565 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530574501 is not closed yet, will try archiving it next time 2024-11-25T10:29:49,565 INFO [regionserver/e2fd862922a0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C37987%2C1732530514773.1732530589565 2024-11-25T10:29:49,574 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,574 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,574 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,574 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,574 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:29:49,574 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530589552 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530589565 2024-11-25T10:29:49,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741846_1022 (size=1258) 2024-11-25T10:29:49,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741846_1022 (size=1258) 2024-11-25T10:29:49,578 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530574501 is not closed yet, will try archiving it next time 2024-11-25T10:29:49,583 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38625:38625),(127.0.0.1/127.0.0.1:46043:46043)] 2024-11-25T10:29:49,584 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530574501 is not closed yet, will try archiving it next time 2024-11-25T10:29:49,601 INFO [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1e03c635290cb3529695c45e3af34eeb#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:29:49,602 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/3c7ec7603c8844e1b82351638166f0c9 is 1080, key is row0001/info:/1732530527169/Put/seqid=0 2024-11-25T10:29:49,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741848_1024 (size=27710) 2024-11-25T10:29:49,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741848_1024 (size=27710) 2024-11-25T10:29:49,621 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/3c7ec7603c8844e1b82351638166f0c9 as hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/3c7ec7603c8844e1b82351638166f0c9 2024-11-25T10:29:49,639 INFO [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1e03c635290cb3529695c45e3af34eeb/info of 1e03c635290cb3529695c45e3af34eeb into 3c7ec7603c8844e1b82351638166f0c9(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:29:49,639 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1e03c635290cb3529695c45e3af34eeb: 2024-11-25T10:29:49,641 INFO [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb., storeName=1e03c635290cb3529695c45e3af34eeb/info, priority=13, startTime=1732530589552; duration=0sec 2024-11-25T10:29:49,641 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-25T10:29:49,641 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:29:49,641 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/3c7ec7603c8844e1b82351638166f0c9 because midkey is the same as first or last row 2024-11-25T10:29:49,642 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-25T10:29:49,642 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:29:49,642 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/3c7ec7603c8844e1b82351638166f0c9 because midkey is the same as first or last row 2024-11-25T10:29:49,642 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-25T10:29:49,642 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:29:49,642 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/3c7ec7603c8844e1b82351638166f0c9 because midkey is the same as first or last row 2024-11-25T10:29:49,642 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:29:49,642 DEBUG [RS:0;e2fd862922a0:37987-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1e03c635290cb3529695c45e3af34eeb:info 2024-11-25T10:29:49,955 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/WALs/e2fd862922a0,37987,1732530514773/e2fd862922a0%2C37987%2C1732530514773.1732530574501 to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/oldWALs/e2fd862922a0%2C37987%2C1732530514773.1732530574501 2024-11-25T10:30:01,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37987 {}] regionserver.HRegion(8855): Flush requested on 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:30:01,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1e03c635290cb3529695c45e3af34eeb 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T10:30:01,600 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/907b992a45e244c4828bcbbc5ad93caa is 1080, key is row0022/info:/1732530589566/Put/seqid=0 2024-11-25T10:30:01,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741849_1025 (size=12509) 2024-11-25T10:30:01,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741849_1025 (size=12509) 2024-11-25T10:30:01,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/907b992a45e244c4828bcbbc5ad93caa 2024-11-25T10:30:01,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/907b992a45e244c4828bcbbc5ad93caa as hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/907b992a45e244c4828bcbbc5ad93caa 2024-11-25T10:30:01,642 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/907b992a45e244c4828bcbbc5ad93caa, entries=7, sequenceid=42, filesize=12.2 K 2024-11-25T10:30:01,643 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1e03c635290cb3529695c45e3af34eeb in 52ms, sequenceid=42, compaction requested=false 2024-11-25T10:30:01,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1e03c635290cb3529695c45e3af34eeb: 2024-11-25T10:30:01,644 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-25T10:30:01,644 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:30:01,644 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/3c7ec7603c8844e1b82351638166f0c9 because midkey is the same as first or last row 2024-11-25T10:30:02,831 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T10:30:07,490 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1e03c635290cb3529695c45e3af34eeb, had cached 0 bytes from a total of 40219 2024-11-25T10:30:09,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T10:30:09,604 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T10:30:09,604 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:30:09,611 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:09,612 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:09,612 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T10:30:09,612 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T10:30:09,612 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=350154228, stopped=false 2024-11-25T10:30:09,613 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e2fd862922a0,42855,1732530513841 2024-11-25T10:30:09,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:30:09,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:30:09,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:09,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:09,615 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T10:30:09,615 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T10:30:09,615 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:30:09,615 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:09,616 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:30:09,616 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2fd862922a0,37987,1732530514773' ***** 2024-11-25T10:30:09,616 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:30:09,616 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T10:30:09,616 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T10:30:09,616 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T10:30:09,616 INFO [RS:0;e2fd862922a0:37987 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T10:30:09,617 INFO [RS:0;e2fd862922a0:37987 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T10:30:09,617 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(3091): Received CLOSE for 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:30:09,618 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(959): stopping server e2fd862922a0,37987,1732530514773 2024-11-25T10:30:09,618 INFO [RS:0;e2fd862922a0:37987 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:30:09,618 INFO [RS:0;e2fd862922a0:37987 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e2fd862922a0:37987. 2024-11-25T10:30:09,618 DEBUG [RS:0;e2fd862922a0:37987 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:30:09,618 DEBUG [RS:0;e2fd862922a0:37987 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:09,618 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1e03c635290cb3529695c45e3af34eeb, disabling compactions & flushes 2024-11-25T10:30:09,618 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T10:30:09,619 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. 2024-11-25T10:30:09,619 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T10:30:09,619 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. 2024-11-25T10:30:09,619 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T10:30:09,619 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. after waiting 0 ms 2024-11-25T10:30:09,619 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. 2024-11-25T10:30:09,619 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T10:30:09,619 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 1e03c635290cb3529695c45e3af34eeb 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-25T10:30:09,619 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-25T10:30:09,619 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T10:30:09,619 DEBUG [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(1325): Online Regions={1e03c635290cb3529695c45e3af34eeb=TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb., 1588230740=hbase:meta,,1.1588230740} 2024-11-25T10:30:09,619 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T10:30:09,620 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T10:30:09,620 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T10:30:09,620 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T10:30:09,620 DEBUG [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 1e03c635290cb3529695c45e3af34eeb 2024-11-25T10:30:09,620 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-25T10:30:09,628 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/3cd7a1a4ac284bd1b5b3df43843e2a70 is 1080, key is row0029/info:/1732530603593/Put/seqid=0 2024-11-25T10:30:09,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741850_1026 (size=8193) 2024-11-25T10:30:09,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741850_1026 (size=8193) 2024-11-25T10:30:09,641 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/3cd7a1a4ac284bd1b5b3df43843e2a70 2024-11-25T10:30:09,653 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/.tmp/info/3cd7a1a4ac284bd1b5b3df43843e2a70 as hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/3cd7a1a4ac284bd1b5b3df43843e2a70 2024-11-25T10:30:09,654 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/.tmp/info/14191c44acff4502a21604380746b055 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb./info:regioninfo/1732530517517/Put/seqid=0 2024-11-25T10:30:09,663 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/3cd7a1a4ac284bd1b5b3df43843e2a70, entries=3, sequenceid=48, filesize=8.0 K 2024-11-25T10:30:09,665 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1e03c635290cb3529695c45e3af34eeb in 46ms, sequenceid=48, compaction requested=true 2024-11-25T10:30:09,666 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/d95f9bc33a874272929a659acd09837c, hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/02ef13054db74e4d867dededfc413663, hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/427dd55d7abb47e7a61d174c4abe8517] to archive 2024-11-25T10:30:09,670 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T10:30:09,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741851_1027 (size=7016) 2024-11-25T10:30:09,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741851_1027 (size=7016) 2024-11-25T10:30:09,675 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/d95f9bc33a874272929a659acd09837c to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/d95f9bc33a874272929a659acd09837c 2024-11-25T10:30:09,677 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/.tmp/info/14191c44acff4502a21604380746b055 2024-11-25T10:30:09,678 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/02ef13054db74e4d867dededfc413663 to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/02ef13054db74e4d867dededfc413663 2024-11-25T10:30:09,680 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/427dd55d7abb47e7a61d174c4abe8517 to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/archive/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/info/427dd55d7abb47e7a61d174c4abe8517 2024-11-25T10:30:09,694 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e2fd862922a0:42855 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-25T10:30:09,700 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [d95f9bc33a874272929a659acd09837c=12509, 02ef13054db74e4d867dededfc413663=12509, 427dd55d7abb47e7a61d174c4abe8517=12509] 2024-11-25T10:30:09,707 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/default/TestLogRolling-testSlowSyncLogRolling/1e03c635290cb3529695c45e3af34eeb/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-25T10:30:09,711 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. 2024-11-25T10:30:09,711 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1e03c635290cb3529695c45e3af34eeb: Waiting for close lock at 1732530609618Running coprocessor pre-close hooks at 1732530609618Disabling compacts and flushes for region at 1732530609618Disabling writes for close at 1732530609619 (+1 ms)Obtaining lock to block concurrent updates at 1732530609619Preparing flush snapshotting stores in 1e03c635290cb3529695c45e3af34eeb at 1732530609619Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732530609619Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. at 1732530609621 (+2 ms)Flushing 1e03c635290cb3529695c45e3af34eeb/info: creating writer at 1732530609621Flushing 1e03c635290cb3529695c45e3af34eeb/info: appending metadata at 1732530609628 (+7 ms)Flushing 1e03c635290cb3529695c45e3af34eeb/info: closing flushed file at 1732530609628Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6173091e: reopening flushed file at 1732530609652 (+24 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1e03c635290cb3529695c45e3af34eeb in 46ms, sequenceid=48, compaction requested=true at 1732530609665 (+13 ms)Writing region close event to WAL at 1732530609701 (+36 ms)Running coprocessor post-close hooks at 1732530609708 (+7 ms)Closed at 1732530609711 (+3 ms) 2024-11-25T10:30:09,711 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/.tmp/ns/29a5e2d1e59e4076aefa23be295004fc is 43, key is default/ns:d/1732530516820/Put/seqid=0 2024-11-25T10:30:09,712 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732530517043.1e03c635290cb3529695c45e3af34eeb. 2024-11-25T10:30:09,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741852_1028 (size=5153) 2024-11-25T10:30:09,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741852_1028 (size=5153) 2024-11-25T10:30:09,722 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/.tmp/ns/29a5e2d1e59e4076aefa23be295004fc 2024-11-25T10:30:09,756 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/.tmp/table/8d42fd5e55a244928132b5d5da1b1ae8 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732530517560/Put/seqid=0 2024-11-25T10:30:09,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741853_1029 (size=5396) 2024-11-25T10:30:09,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741853_1029 (size=5396) 2024-11-25T10:30:09,820 DEBUG [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T10:30:10,020 DEBUG [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T10:30:10,048 INFO [regionserver/e2fd862922a0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:30:10,067 INFO [regionserver/e2fd862922a0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-25T10:30:10,068 INFO [regionserver/e2fd862922a0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-25T10:30:10,165 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/.tmp/table/8d42fd5e55a244928132b5d5da1b1ae8 2024-11-25T10:30:10,176 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/.tmp/info/14191c44acff4502a21604380746b055 as hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/info/14191c44acff4502a21604380746b055 2024-11-25T10:30:10,185 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/info/14191c44acff4502a21604380746b055, entries=10, sequenceid=11, filesize=6.9 K 2024-11-25T10:30:10,187 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/.tmp/ns/29a5e2d1e59e4076aefa23be295004fc as hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/ns/29a5e2d1e59e4076aefa23be295004fc 2024-11-25T10:30:10,196 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/ns/29a5e2d1e59e4076aefa23be295004fc, entries=2, sequenceid=11, filesize=5.0 K 2024-11-25T10:30:10,198 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/.tmp/table/8d42fd5e55a244928132b5d5da1b1ae8 as hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/table/8d42fd5e55a244928132b5d5da1b1ae8 2024-11-25T10:30:10,206 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/table/8d42fd5e55a244928132b5d5da1b1ae8, entries=2, sequenceid=11, filesize=5.3 K 2024-11-25T10:30:10,208 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 588ms, sequenceid=11, compaction requested=false 2024-11-25T10:30:10,215 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-25T10:30:10,216 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T10:30:10,216 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T10:30:10,216 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732530609619Running coprocessor pre-close hooks at 1732530609619Disabling compacts and flushes for region at 1732530609619Disabling writes for close at 1732530609620 (+1 ms)Obtaining lock to block concurrent updates at 1732530609620Preparing flush snapshotting stores in 1588230740 at 1732530609620Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732530609621 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732530609622 (+1 ms)Flushing 1588230740/info: creating writer at 1732530609623 (+1 ms)Flushing 1588230740/info: appending metadata at 1732530609654 (+31 ms)Flushing 1588230740/info: closing flushed file at 1732530609654Flushing 1588230740/ns: creating writer at 1732530609687 (+33 ms)Flushing 1588230740/ns: appending metadata at 1732530609711 (+24 ms)Flushing 1588230740/ns: closing flushed file at 1732530609711Flushing 1588230740/table: creating writer at 1732530609731 (+20 ms)Flushing 1588230740/table: appending metadata at 1732530609755 (+24 ms)Flushing 1588230740/table: closing flushed file at 1732530609755Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17a4b2fd: reopening flushed file at 1732530610174 (+419 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@768b358: reopening flushed file at 1732530610185 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32a580e9: reopening flushed file at 1732530610196 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 588ms, sequenceid=11, compaction requested=false at 1732530610208 (+12 ms)Writing region close event to WAL at 1732530610209 (+1 ms)Running coprocessor post-close hooks at 1732530610216 (+7 ms)Closed at 1732530610216 2024-11-25T10:30:10,216 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T10:30:10,221 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(976): stopping server e2fd862922a0,37987,1732530514773; all regions closed. 2024-11-25T10:30:10,223 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,223 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,223 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,223 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,223 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741834_1010 (size=3066) 2024-11-25T10:30:10,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741834_1010 (size=3066) 2024-11-25T10:30:10,231 DEBUG [RS:0;e2fd862922a0:37987 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/oldWALs 2024-11-25T10:30:10,231 INFO [RS:0;e2fd862922a0:37987 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C37987%2C1732530514773.meta:.meta(num 1732530516636) 2024-11-25T10:30:10,231 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,231 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,232 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,232 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,232 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741847_1023 (size=12695) 2024-11-25T10:30:10,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741847_1023 (size=12695) 2024-11-25T10:30:10,239 DEBUG [RS:0;e2fd862922a0:37987 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/oldWALs 2024-11-25T10:30:10,239 INFO [RS:0;e2fd862922a0:37987 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C37987%2C1732530514773:(num 1732530589565) 2024-11-25T10:30:10,239 DEBUG [RS:0;e2fd862922a0:37987 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:10,239 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:30:10,240 INFO [RS:0;e2fd862922a0:37987 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:30:10,240 INFO [RS:0;e2fd862922a0:37987 {}] hbase.ChoreService(370): Chore service for: regionserver/e2fd862922a0:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T10:30:10,240 INFO [RS:0;e2fd862922a0:37987 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:30:10,240 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:30:10,241 INFO [RS:0;e2fd862922a0:37987 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37987 2024-11-25T10:30:10,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2fd862922a0,37987,1732530514773 2024-11-25T10:30:10,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:30:10,246 INFO [RS:0;e2fd862922a0:37987 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:30:10,247 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2fd862922a0,37987,1732530514773] 2024-11-25T10:30:10,249 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2fd862922a0,37987,1732530514773 already deleted, retry=false 2024-11-25T10:30:10,249 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2fd862922a0,37987,1732530514773 expired; onlineServers=0 2024-11-25T10:30:10,250 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e2fd862922a0,42855,1732530513841' ***** 2024-11-25T10:30:10,250 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T10:30:10,250 INFO [M:0;e2fd862922a0:42855 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:30:10,250 INFO [M:0;e2fd862922a0:42855 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:30:10,250 DEBUG [M:0;e2fd862922a0:42855 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T10:30:10,250 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T10:30:10,250 DEBUG [M:0;e2fd862922a0:42855 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T10:30:10,250 DEBUG [master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530515901 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530515901,5,FailOnTimeoutGroup] 2024-11-25T10:30:10,250 DEBUG [master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530515900 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530515900,5,FailOnTimeoutGroup] 2024-11-25T10:30:10,250 INFO [M:0;e2fd862922a0:42855 {}] hbase.ChoreService(370): Chore service for: master/e2fd862922a0:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T10:30:10,251 INFO [M:0;e2fd862922a0:42855 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:30:10,251 DEBUG [M:0;e2fd862922a0:42855 {}] master.HMaster(1795): Stopping service threads 2024-11-25T10:30:10,251 INFO [M:0;e2fd862922a0:42855 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T10:30:10,251 INFO [M:0;e2fd862922a0:42855 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T10:30:10,251 INFO [M:0;e2fd862922a0:42855 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T10:30:10,251 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T10:30:10,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T10:30:10,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:10,252 DEBUG [M:0;e2fd862922a0:42855 {}] zookeeper.ZKUtil(347): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T10:30:10,252 WARN [M:0;e2fd862922a0:42855 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T10:30:10,253 INFO [M:0;e2fd862922a0:42855 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/.lastflushedseqids 2024-11-25T10:30:10,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741854_1030 (size=130) 2024-11-25T10:30:10,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741854_1030 (size=130) 2024-11-25T10:30:10,267 INFO [M:0;e2fd862922a0:42855 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T10:30:10,267 INFO [M:0;e2fd862922a0:42855 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T10:30:10,267 DEBUG [M:0;e2fd862922a0:42855 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T10:30:10,267 INFO [M:0;e2fd862922a0:42855 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:10,267 DEBUG [M:0;e2fd862922a0:42855 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:10,267 DEBUG [M:0;e2fd862922a0:42855 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T10:30:10,267 DEBUG [M:0;e2fd862922a0:42855 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:10,267 INFO [M:0;e2fd862922a0:42855 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-25T10:30:10,286 DEBUG [M:0;e2fd862922a0:42855 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6b95da91dfa1410a9fc5c1b64367a4ff is 82, key is hbase:meta,,1/info:regioninfo/1732530516724/Put/seqid=0 2024-11-25T10:30:10,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741855_1031 (size=5672) 2024-11-25T10:30:10,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741855_1031 (size=5672) 2024-11-25T10:30:10,294 INFO [M:0;e2fd862922a0:42855 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6b95da91dfa1410a9fc5c1b64367a4ff 2024-11-25T10:30:10,317 DEBUG [M:0;e2fd862922a0:42855 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d11587f3085f43a6901c6ca2d0f58bdc is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732530517569/Put/seqid=0 2024-11-25T10:30:10,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741856_1032 (size=6247) 2024-11-25T10:30:10,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741856_1032 (size=6247) 2024-11-25T10:30:10,325 INFO [M:0;e2fd862922a0:42855 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d11587f3085f43a6901c6ca2d0f58bdc 2024-11-25T10:30:10,331 INFO [M:0;e2fd862922a0:42855 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d11587f3085f43a6901c6ca2d0f58bdc 2024-11-25T10:30:10,348 DEBUG [M:0;e2fd862922a0:42855 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8f6ed1a44ec24782978d533556c996b5 is 69, key is e2fd862922a0,37987,1732530514773/rs:state/1732530515962/Put/seqid=0 2024-11-25T10:30:10,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:30:10,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37987-0x1014eab8e8d0001, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:30:10,349 INFO [RS:0;e2fd862922a0:37987 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:30:10,350 INFO [RS:0;e2fd862922a0:37987 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2fd862922a0,37987,1732530514773; zookeeper connection closed. 2024-11-25T10:30:10,350 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@8359335 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@8359335 2024-11-25T10:30:10,351 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T10:30:10,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741857_1033 (size=5156) 2024-11-25T10:30:10,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741857_1033 (size=5156) 2024-11-25T10:30:10,354 INFO [M:0;e2fd862922a0:42855 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8f6ed1a44ec24782978d533556c996b5 2024-11-25T10:30:10,377 DEBUG [M:0;e2fd862922a0:42855 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/99706859f211440898daf8e1c18a2294 is 52, key is load_balancer_on/state:d/1732530517018/Put/seqid=0 2024-11-25T10:30:10,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741858_1034 (size=5056) 2024-11-25T10:30:10,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741858_1034 (size=5056) 2024-11-25T10:30:10,385 INFO [M:0;e2fd862922a0:42855 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/99706859f211440898daf8e1c18a2294 2024-11-25T10:30:10,393 DEBUG [M:0;e2fd862922a0:42855 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6b95da91dfa1410a9fc5c1b64367a4ff as hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6b95da91dfa1410a9fc5c1b64367a4ff 2024-11-25T10:30:10,400 INFO [M:0;e2fd862922a0:42855 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6b95da91dfa1410a9fc5c1b64367a4ff, entries=8, sequenceid=59, filesize=5.5 K 2024-11-25T10:30:10,401 DEBUG [M:0;e2fd862922a0:42855 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d11587f3085f43a6901c6ca2d0f58bdc as hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d11587f3085f43a6901c6ca2d0f58bdc 2024-11-25T10:30:10,408 INFO [M:0;e2fd862922a0:42855 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d11587f3085f43a6901c6ca2d0f58bdc 2024-11-25T10:30:10,408 INFO [M:0;e2fd862922a0:42855 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d11587f3085f43a6901c6ca2d0f58bdc, entries=6, sequenceid=59, filesize=6.1 K 2024-11-25T10:30:10,410 DEBUG [M:0;e2fd862922a0:42855 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8f6ed1a44ec24782978d533556c996b5 as hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8f6ed1a44ec24782978d533556c996b5 2024-11-25T10:30:10,417 INFO [M:0;e2fd862922a0:42855 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8f6ed1a44ec24782978d533556c996b5, entries=1, sequenceid=59, filesize=5.0 K 2024-11-25T10:30:10,418 DEBUG [M:0;e2fd862922a0:42855 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/99706859f211440898daf8e1c18a2294 as hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/99706859f211440898daf8e1c18a2294 2024-11-25T10:30:10,425 INFO [M:0;e2fd862922a0:42855 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/99706859f211440898daf8e1c18a2294, entries=1, sequenceid=59, filesize=4.9 K 2024-11-25T10:30:10,427 INFO [M:0;e2fd862922a0:42855 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 160ms, sequenceid=59, compaction requested=false 2024-11-25T10:30:10,429 INFO [M:0;e2fd862922a0:42855 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:10,429 DEBUG [M:0;e2fd862922a0:42855 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732530610267Disabling compacts and flushes for region at 1732530610267Disabling writes for close at 1732530610267Obtaining lock to block concurrent updates at 1732530610267Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732530610267Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732530610268 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732530610269 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732530610269Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732530610286 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732530610286Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732530610300 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732530610317 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732530610317Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732530610331 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732530610347 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732530610347Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732530610361 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732530610377 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732530610377Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4696dca2: reopening flushed file at 1732530610392 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21676a43: reopening flushed file at 1732530610400 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b633c70: reopening flushed file at 1732530610408 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c584a: reopening flushed file at 1732530610417 (+9 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 160ms, sequenceid=59, compaction requested=false at 1732530610427 (+10 ms)Writing region close event to WAL at 1732530610429 (+2 ms)Closed at 1732530610429 2024-11-25T10:30:10,430 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,430 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,430 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,430 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,431 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:10,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38111 is added to blk_1073741830_1006 (size=27973) 2024-11-25T10:30:10,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44819 is added to blk_1073741830_1006 (size=27973) 2024-11-25T10:30:10,434 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:30:10,434 INFO [M:0;e2fd862922a0:42855 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T10:30:10,434 INFO [M:0;e2fd862922a0:42855 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42855 2024-11-25T10:30:10,435 INFO [M:0;e2fd862922a0:42855 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:30:10,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:30:10,538 INFO [M:0;e2fd862922a0:42855 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:30:10,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42855-0x1014eab8e8d0000, quorum=127.0.0.1:62959, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:30:10,543 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1467625d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:10,546 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:30:10,546 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:30:10,546 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:30:10,546 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/hadoop.log.dir/,STOPPED} 2024-11-25T10:30:10,549 WARN [BP-190105748-172.17.0.2-1732530510466 heartbeating to localhost/127.0.0.1:39029 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:30:10,549 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:30:10,549 WARN [BP-190105748-172.17.0.2-1732530510466 heartbeating to localhost/127.0.0.1:39029 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-190105748-172.17.0.2-1732530510466 (Datanode Uuid e2ef4006-82b2-4d47-8672-8d6f9a5ba728) service to localhost/127.0.0.1:39029 2024-11-25T10:30:10,549 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:30:10,550 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/cluster_ddd9d406-2133-59c1-e409-4ead031546ea/data/data3/current/BP-190105748-172.17.0.2-1732530510466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:10,551 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/cluster_ddd9d406-2133-59c1-e409-4ead031546ea/data/data4/current/BP-190105748-172.17.0.2-1732530510466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:10,551 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:30:10,553 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c2fdbac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:10,554 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:30:10,554 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:30:10,554 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:30:10,554 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/hadoop.log.dir/,STOPPED} 2024-11-25T10:30:10,556 WARN [BP-190105748-172.17.0.2-1732530510466 heartbeating to localhost/127.0.0.1:39029 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:30:10,556 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:30:10,556 WARN [BP-190105748-172.17.0.2-1732530510466 heartbeating to localhost/127.0.0.1:39029 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-190105748-172.17.0.2-1732530510466 (Datanode Uuid 6c256c1f-3b9a-445c-a0e1-04d30c215f5e) service to localhost/127.0.0.1:39029 2024-11-25T10:30:10,556 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:30:10,557 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/cluster_ddd9d406-2133-59c1-e409-4ead031546ea/data/data1/current/BP-190105748-172.17.0.2-1732530510466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:10,557 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/cluster_ddd9d406-2133-59c1-e409-4ead031546ea/data/data2/current/BP-190105748-172.17.0.2-1732530510466 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:10,558 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:30:10,567 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T10:30:10,568 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:30:10,568 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:30:10,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:30:10,569 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/hadoop.log.dir/,STOPPED} 2024-11-25T10:30:10,578 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T10:30:10,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T10:30:10,622 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39029 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/e2fd862922a0:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/e2fd862922a0:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@1d316e22 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39029 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39029 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39029 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39029 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39029 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:39029 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39029 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/e2fd862922a0:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=217 (was 217), ProcessCount=11 (was 11), AvailableMemoryMB=2279 (was 2098) - AvailableMemoryMB LEAK? - 2024-11-25T10:30:10,629 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=217, ProcessCount=11, AvailableMemoryMB=2278 2024-11-25T10:30:10,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T10:30:10,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/hadoop.log.dir so I do NOT create it in target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35 2024-11-25T10:30:10,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29be0803-ae3f-1822-276d-33eec9fc78af/hadoop.tmp.dir so I do NOT create it in target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35 2024-11-25T10:30:10,630 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/cluster_ad416049-3e59-7d3f-041e-ad37e75082be, deleteOnExit=true 2024-11-25T10:30:10,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T10:30:10,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/test.cache.data in system properties and HBase conf 2024-11-25T10:30:10,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T10:30:10,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/hadoop.log.dir in system properties and HBase conf 2024-11-25T10:30:10,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T10:30:10,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T10:30:10,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T10:30:10,630 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T10:30:10,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T10:30:10,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T10:30:10,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T10:30:10,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T10:30:10,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T10:30:10,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T10:30:10,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T10:30:10,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T10:30:10,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T10:30:10,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/nfs.dump.dir in system properties and HBase conf 2024-11-25T10:30:10,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/java.io.tmpdir in system properties and HBase conf 2024-11-25T10:30:10,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T10:30:10,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T10:30:10,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T10:30:10,651 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T10:30:10,738 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:30:10,748 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:30:10,749 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:30:10,749 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:30:10,749 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T10:30:10,750 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:30:10,751 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59505eb5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:30:10,751 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f681677{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:30:10,870 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b8ef2ff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/java.io.tmpdir/jetty-localhost-36201-hadoop-hdfs-3_4_1-tests_jar-_-any-14082130459030762646/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T10:30:10,871 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10885b70{HTTP/1.1, (http/1.1)}{localhost:36201} 2024-11-25T10:30:10,871 INFO [Time-limited test {}] server.Server(415): Started @102383ms 2024-11-25T10:30:10,887 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T10:30:10,982 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:30:10,987 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:30:10,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:30:10,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:30:10,988 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:30:10,989 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@469dec96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:30:10,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@eab7acc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:30:11,111 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2152d149{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/java.io.tmpdir/jetty-localhost-34071-hadoop-hdfs-3_4_1-tests_jar-_-any-9553193148273924291/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:11,112 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1d281952{HTTP/1.1, (http/1.1)}{localhost:34071} 2024-11-25T10:30:11,112 INFO [Time-limited test {}] server.Server(415): Started @102624ms 2024-11-25T10:30:11,113 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:30:11,151 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:30:11,155 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:30:11,156 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:30:11,156 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:30:11,156 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:30:11,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@274298f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:30:11,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c1be80f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:30:11,229 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/cluster_ad416049-3e59-7d3f-041e-ad37e75082be/data/data2/current/BP-1525056479-172.17.0.2-1732530610673/current, will proceed with Du for space computation calculation, 2024-11-25T10:30:11,229 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/cluster_ad416049-3e59-7d3f-041e-ad37e75082be/data/data1/current/BP-1525056479-172.17.0.2-1732530610673/current, will proceed with Du for space computation calculation, 2024-11-25T10:30:11,249 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:30:11,252 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c55b4c57027cc9b with lease ID 0xc709b8f014126f8: Processing first storage report for DS-a8507e7f-e557-4688-b792-75912b28ce0d from datanode DatanodeRegistration(127.0.0.1:33573, datanodeUuid=b5d36adb-d28f-4ca5-9ac1-3f15cb4352a6, infoPort=41621, infoSecurePort=0, ipcPort=39089, storageInfo=lv=-57;cid=testClusterID;nsid=1414111723;c=1732530610673) 2024-11-25T10:30:11,252 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c55b4c57027cc9b with lease ID 0xc709b8f014126f8: from storage DS-a8507e7f-e557-4688-b792-75912b28ce0d node DatanodeRegistration(127.0.0.1:33573, datanodeUuid=b5d36adb-d28f-4ca5-9ac1-3f15cb4352a6, infoPort=41621, infoSecurePort=0, ipcPort=39089, storageInfo=lv=-57;cid=testClusterID;nsid=1414111723;c=1732530610673), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:11,252 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8c55b4c57027cc9b with lease ID 0xc709b8f014126f8: Processing first storage report for DS-61db8c7d-c10e-4c63-9458-0b6ea05642b2 from datanode DatanodeRegistration(127.0.0.1:33573, datanodeUuid=b5d36adb-d28f-4ca5-9ac1-3f15cb4352a6, infoPort=41621, infoSecurePort=0, ipcPort=39089, storageInfo=lv=-57;cid=testClusterID;nsid=1414111723;c=1732530610673) 2024-11-25T10:30:11,252 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c55b4c57027cc9b with lease ID 0xc709b8f014126f8: from storage DS-61db8c7d-c10e-4c63-9458-0b6ea05642b2 node DatanodeRegistration(127.0.0.1:33573, datanodeUuid=b5d36adb-d28f-4ca5-9ac1-3f15cb4352a6, infoPort=41621, infoSecurePort=0, ipcPort=39089, storageInfo=lv=-57;cid=testClusterID;nsid=1414111723;c=1732530610673), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:11,279 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6838bf55{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/java.io.tmpdir/jetty-localhost-40473-hadoop-hdfs-3_4_1-tests_jar-_-any-13486653539852541333/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:11,280 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@254e0164{HTTP/1.1, (http/1.1)}{localhost:40473} 2024-11-25T10:30:11,280 INFO [Time-limited test {}] server.Server(415): Started @102792ms 2024-11-25T10:30:11,282 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:30:11,406 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/cluster_ad416049-3e59-7d3f-041e-ad37e75082be/data/data3/current/BP-1525056479-172.17.0.2-1732530610673/current, will proceed with Du for space computation calculation, 2024-11-25T10:30:11,406 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/cluster_ad416049-3e59-7d3f-041e-ad37e75082be/data/data4/current/BP-1525056479-172.17.0.2-1732530610673/current, will proceed with Du for space computation calculation, 2024-11-25T10:30:11,426 WARN [Thread-452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:30:11,430 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ff71f739060695f with lease ID 0xc709b8f014126f9: Processing first storage report for DS-089aefd4-56cc-4617-8edc-7150c560ec52 from datanode DatanodeRegistration(127.0.0.1:33657, datanodeUuid=1712f55e-eab1-43df-8985-3675dd251ec0, infoPort=37259, infoSecurePort=0, ipcPort=41035, storageInfo=lv=-57;cid=testClusterID;nsid=1414111723;c=1732530610673) 2024-11-25T10:30:11,431 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ff71f739060695f with lease ID 0xc709b8f014126f9: from storage DS-089aefd4-56cc-4617-8edc-7150c560ec52 node DatanodeRegistration(127.0.0.1:33657, datanodeUuid=1712f55e-eab1-43df-8985-3675dd251ec0, infoPort=37259, infoSecurePort=0, ipcPort=41035, storageInfo=lv=-57;cid=testClusterID;nsid=1414111723;c=1732530610673), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:11,431 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ff71f739060695f with lease ID 0xc709b8f014126f9: Processing first storage report for DS-ff9fc601-1b2b-4537-998b-a1d67ec6852c from datanode DatanodeRegistration(127.0.0.1:33657, datanodeUuid=1712f55e-eab1-43df-8985-3675dd251ec0, infoPort=37259, infoSecurePort=0, ipcPort=41035, storageInfo=lv=-57;cid=testClusterID;nsid=1414111723;c=1732530610673) 2024-11-25T10:30:11,431 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ff71f739060695f with lease ID 0xc709b8f014126f9: from storage DS-ff9fc601-1b2b-4537-998b-a1d67ec6852c node DatanodeRegistration(127.0.0.1:33657, datanodeUuid=1712f55e-eab1-43df-8985-3675dd251ec0, infoPort=37259, infoSecurePort=0, ipcPort=41035, storageInfo=lv=-57;cid=testClusterID;nsid=1414111723;c=1732530610673), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:11,512 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35 2024-11-25T10:30:11,516 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/cluster_ad416049-3e59-7d3f-041e-ad37e75082be/zookeeper_0, clientPort=56926, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/cluster_ad416049-3e59-7d3f-041e-ad37e75082be/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/cluster_ad416049-3e59-7d3f-041e-ad37e75082be/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T10:30:11,517 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56926 2024-11-25T10:30:11,518 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:11,519 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:11,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:30:11,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:30:11,533 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb with version=8 2024-11-25T10:30:11,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/hbase-staging 2024-11-25T10:30:11,536 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:30:11,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:11,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:11,536 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:30:11,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:11,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:30:11,536 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T10:30:11,537 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:30:11,537 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37613 2024-11-25T10:30:11,539 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37613 connecting to ZooKeeper ensemble=127.0.0.1:56926 2024-11-25T10:30:11,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:376130x0, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:30:11,545 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37613-0x1014ead0fcb0000 connected 2024-11-25T10:30:11,561 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:11,563 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:11,568 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:30:11,568 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb, hbase.cluster.distributed=false 2024-11-25T10:30:11,570 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:30:11,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37613 2024-11-25T10:30:11,573 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37613 2024-11-25T10:30:11,573 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37613 2024-11-25T10:30:11,573 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37613 2024-11-25T10:30:11,574 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37613 2024-11-25T10:30:11,591 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:30:11,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:11,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:11,592 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:30:11,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:11,592 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:30:11,592 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T10:30:11,592 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:30:11,593 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39071 2024-11-25T10:30:11,595 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39071 connecting to ZooKeeper ensemble=127.0.0.1:56926 2024-11-25T10:30:11,596 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:11,598 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:11,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390710x0, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:30:11,607 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:390710x0, quorum=127.0.0.1:56926, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:30:11,607 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39071-0x1014ead0fcb0001 connected 2024-11-25T10:30:11,607 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T10:30:11,612 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T10:30:11,613 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T10:30:11,614 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:30:11,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39071 2024-11-25T10:30:11,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39071 2024-11-25T10:30:11,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39071 2024-11-25T10:30:11,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39071 2024-11-25T10:30:11,629 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39071 2024-11-25T10:30:11,642 DEBUG [M:0;e2fd862922a0:37613 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e2fd862922a0:37613 2024-11-25T10:30:11,643 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e2fd862922a0,37613,1732530611536 2024-11-25T10:30:11,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:30:11,645 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:30:11,646 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e2fd862922a0,37613,1732530611536 2024-11-25T10:30:11,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:11,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T10:30:11,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:11,649 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T10:30:11,650 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e2fd862922a0,37613,1732530611536 from backup master directory 2024-11-25T10:30:11,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e2fd862922a0,37613,1732530611536 2024-11-25T10:30:11,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:30:11,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:30:11,651 WARN [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:30:11,651 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e2fd862922a0,37613,1732530611536 2024-11-25T10:30:11,657 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/hbase.id] with ID: 8e907214-f645-4754-ae06-706d2fa892d2 2024-11-25T10:30:11,657 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/.tmp/hbase.id 2024-11-25T10:30:11,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741826_1002 (size=42) 2024-11-25T10:30:11,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741826_1002 (size=42) 2024-11-25T10:30:11,665 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/.tmp/hbase.id]:[hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/hbase.id] 2024-11-25T10:30:11,679 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:11,679 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T10:30:11,681 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-25T10:30:11,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:11,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:11,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:30:11,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:30:11,691 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T10:30:11,692 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T10:30:11,692 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:30:11,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741828_1004 (size=1189) 2024-11-25T10:30:11,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741828_1004 (size=1189) 2024-11-25T10:30:11,702 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store 2024-11-25T10:30:11,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741829_1005 (size=34) 2024-11-25T10:30:11,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741829_1005 (size=34) 2024-11-25T10:30:11,711 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:30:11,711 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T10:30:11,711 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:11,711 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:11,711 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T10:30:11,711 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:11,711 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:11,711 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732530611711Disabling compacts and flushes for region at 1732530611711Disabling writes for close at 1732530611711Writing region close event to WAL at 1732530611711Closed at 1732530611711 2024-11-25T10:30:11,713 WARN [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/.initializing 2024-11-25T10:30:11,713 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/WALs/e2fd862922a0,37613,1732530611536 2024-11-25T10:30:11,716 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C37613%2C1732530611536, suffix=, logDir=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/WALs/e2fd862922a0,37613,1732530611536, archiveDir=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/oldWALs, maxLogs=10 2024-11-25T10:30:11,717 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C37613%2C1732530611536.1732530611716 2024-11-25T10:30:11,723 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/WALs/e2fd862922a0,37613,1732530611536/e2fd862922a0%2C37613%2C1732530611536.1732530611716 2024-11-25T10:30:11,723 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37259:37259),(127.0.0.1/127.0.0.1:41621:41621)] 2024-11-25T10:30:11,724 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:30:11,724 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:30:11,724 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:11,725 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:11,726 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:11,728 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T10:30:11,728 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:11,729 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:11,729 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:11,730 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T10:30:11,730 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:11,731 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:30:11,731 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:11,733 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T10:30:11,733 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:11,734 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:30:11,734 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:11,735 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T10:30:11,735 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:11,736 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:30:11,737 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:11,738 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:11,738 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:11,740 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:11,740 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:11,740 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T10:30:11,742 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:11,744 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:30:11,745 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757562, jitterRate=-0.036711186170578}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T10:30:11,746 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732530611725Initializing all the Stores at 1732530611726 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530611726Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530611726Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530611726Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530611726Cleaning up temporary data from old regions at 1732530611740 (+14 ms)Region opened successfully at 1732530611746 (+6 ms) 2024-11-25T10:30:11,746 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T10:30:11,751 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16e331c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:30:11,752 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T10:30:11,752 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T10:30:11,752 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T10:30:11,752 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T10:30:11,753 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T10:30:11,753 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T10:30:11,753 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T10:30:11,756 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T10:30:11,757 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T10:30:11,758 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T10:30:11,759 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T10:30:11,759 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T10:30:11,762 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T10:30:11,762 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T10:30:11,763 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T10:30:11,764 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T10:30:11,765 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T10:30:11,766 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T10:30:11,768 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T10:30:11,769 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T10:30:11,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T10:30:11,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T10:30:11,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:11,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:11,773 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e2fd862922a0,37613,1732530611536, sessionid=0x1014ead0fcb0000, setting cluster-up flag (Was=false) 2024-11-25T10:30:11,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:11,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:11,781 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T10:30:11,782 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2fd862922a0,37613,1732530611536 2024-11-25T10:30:11,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:11,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:11,790 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T10:30:11,792 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2fd862922a0,37613,1732530611536 2024-11-25T10:30:11,793 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T10:30:11,795 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T10:30:11,795 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T10:30:11,795 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T10:30:11,796 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e2fd862922a0,37613,1732530611536 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T10:30:11,797 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:30:11,797 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:30:11,797 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:30:11,797 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:30:11,797 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e2fd862922a0:0, corePoolSize=10, maxPoolSize=10 2024-11-25T10:30:11,797 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:11,797 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:30:11,798 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:11,798 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732530641798 2024-11-25T10:30:11,799 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T10:30:11,799 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T10:30:11,799 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T10:30:11,799 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T10:30:11,799 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T10:30:11,799 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T10:30:11,799 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:11,799 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T10:30:11,800 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T10:30:11,800 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T10:30:11,800 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:30:11,800 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T10:30:11,800 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T10:30:11,800 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T10:30:11,800 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530611800,5,FailOnTimeoutGroup] 2024-11-25T10:30:11,800 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530611800,5,FailOnTimeoutGroup] 2024-11-25T10:30:11,800 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:11,800 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T10:30:11,801 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:11,801 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:11,801 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:11,801 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T10:30:11,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:30:11,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:30:11,812 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T10:30:11,812 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb 2024-11-25T10:30:11,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:30:11,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:30:11,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:30:11,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T10:30:11,824 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T10:30:11,824 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:11,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:11,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T10:30:11,826 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T10:30:11,826 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:11,827 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:11,827 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T10:30:11,829 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T10:30:11,829 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:11,829 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:11,829 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T10:30:11,831 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T10:30:11,831 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(746): ClusterId : 8e907214-f645-4754-ae06-706d2fa892d2 2024-11-25T10:30:11,831 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:11,831 DEBUG [RS:0;e2fd862922a0:39071 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T10:30:11,831 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:11,832 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T10:30:11,832 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/data/hbase/meta/1588230740 2024-11-25T10:30:11,833 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/data/hbase/meta/1588230740 2024-11-25T10:30:11,834 DEBUG [RS:0;e2fd862922a0:39071 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T10:30:11,834 DEBUG [RS:0;e2fd862922a0:39071 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T10:30:11,835 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T10:30:11,835 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T10:30:11,835 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T10:30:11,837 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T10:30:11,837 DEBUG [RS:0;e2fd862922a0:39071 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T10:30:11,838 DEBUG [RS:0;e2fd862922a0:39071 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@350ae9cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:30:11,839 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:30:11,840 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=865140, jitterRate=0.10008282959461212}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T10:30:11,842 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732530611821Initializing all the Stores at 1732530611822 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530611822Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530611822Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530611822Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530611822Cleaning up temporary data from old regions at 1732530611835 (+13 ms)Region opened successfully at 1732530611842 (+7 ms) 2024-11-25T10:30:11,842 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T10:30:11,842 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T10:30:11,842 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T10:30:11,842 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T10:30:11,842 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T10:30:11,843 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T10:30:11,843 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732530611842Disabling compacts and flushes for region at 1732530611842Disabling writes for close at 1732530611842Writing region close event to WAL at 1732530611843 (+1 ms)Closed at 1732530611843 2024-11-25T10:30:11,845 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:30:11,845 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T10:30:11,845 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T10:30:11,847 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T10:30:11,849 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T10:30:11,854 DEBUG [RS:0;e2fd862922a0:39071 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e2fd862922a0:39071 2024-11-25T10:30:11,855 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T10:30:11,855 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T10:30:11,855 DEBUG [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T10:30:11,856 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2fd862922a0,37613,1732530611536 with port=39071, startcode=1732530611591 2024-11-25T10:30:11,856 DEBUG [RS:0;e2fd862922a0:39071 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T10:30:11,859 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43275, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T10:30:11,860 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37613 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2fd862922a0,39071,1732530611591 2024-11-25T10:30:11,861 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37613 {}] master.ServerManager(517): Registering regionserver=e2fd862922a0,39071,1732530611591 2024-11-25T10:30:11,864 DEBUG [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb 2024-11-25T10:30:11,864 DEBUG [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39899 2024-11-25T10:30:11,864 DEBUG [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T10:30:11,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:30:11,871 DEBUG [RS:0;e2fd862922a0:39071 {}] zookeeper.ZKUtil(111): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2fd862922a0,39071,1732530611591 2024-11-25T10:30:11,871 WARN [RS:0;e2fd862922a0:39071 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:30:11,872 INFO [RS:0;e2fd862922a0:39071 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:30:11,872 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2fd862922a0,39071,1732530611591] 2024-11-25T10:30:11,872 DEBUG [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/WALs/e2fd862922a0,39071,1732530611591 2024-11-25T10:30:11,878 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T10:30:11,883 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T10:30:11,884 INFO [RS:0;e2fd862922a0:39071 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T10:30:11,885 INFO [RS:0;e2fd862922a0:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:11,886 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T10:30:11,887 INFO [RS:0;e2fd862922a0:39071 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T10:30:11,888 INFO [RS:0;e2fd862922a0:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:11,888 DEBUG [RS:0;e2fd862922a0:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:11,888 DEBUG [RS:0;e2fd862922a0:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:11,888 DEBUG [RS:0;e2fd862922a0:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:11,888 DEBUG [RS:0;e2fd862922a0:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:11,888 DEBUG [RS:0;e2fd862922a0:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:11,888 DEBUG [RS:0;e2fd862922a0:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:30:11,888 DEBUG [RS:0;e2fd862922a0:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:11,888 DEBUG [RS:0;e2fd862922a0:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:11,888 DEBUG [RS:0;e2fd862922a0:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:11,888 DEBUG [RS:0;e2fd862922a0:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:11,888 DEBUG [RS:0;e2fd862922a0:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:11,889 DEBUG [RS:0;e2fd862922a0:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:11,889 DEBUG [RS:0;e2fd862922a0:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:30:11,889 DEBUG [RS:0;e2fd862922a0:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:30:11,896 INFO [RS:0;e2fd862922a0:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:11,897 INFO [RS:0;e2fd862922a0:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:11,897 INFO [RS:0;e2fd862922a0:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:11,897 INFO [RS:0;e2fd862922a0:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:11,897 INFO [RS:0;e2fd862922a0:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:11,897 INFO [RS:0;e2fd862922a0:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,39071,1732530611591-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:30:11,916 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T10:30:11,917 INFO [RS:0;e2fd862922a0:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,39071,1732530611591-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:11,917 INFO [RS:0;e2fd862922a0:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:11,917 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.Replication(171): e2fd862922a0,39071,1732530611591 started 2024-11-25T10:30:11,936 INFO [RS:0;e2fd862922a0:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:11,936 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(1482): Serving as e2fd862922a0,39071,1732530611591, RpcServer on e2fd862922a0/172.17.0.2:39071, sessionid=0x1014ead0fcb0001 2024-11-25T10:30:11,936 DEBUG [RS:0;e2fd862922a0:39071 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T10:30:11,936 DEBUG [RS:0;e2fd862922a0:39071 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2fd862922a0,39071,1732530611591 2024-11-25T10:30:11,936 DEBUG [RS:0;e2fd862922a0:39071 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,39071,1732530611591' 2024-11-25T10:30:11,936 DEBUG [RS:0;e2fd862922a0:39071 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T10:30:11,937 DEBUG [RS:0;e2fd862922a0:39071 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T10:30:11,938 DEBUG [RS:0;e2fd862922a0:39071 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T10:30:11,938 DEBUG [RS:0;e2fd862922a0:39071 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T10:30:11,938 DEBUG [RS:0;e2fd862922a0:39071 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2fd862922a0,39071,1732530611591 2024-11-25T10:30:11,938 DEBUG [RS:0;e2fd862922a0:39071 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,39071,1732530611591' 2024-11-25T10:30:11,938 DEBUG [RS:0;e2fd862922a0:39071 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T10:30:11,938 DEBUG [RS:0;e2fd862922a0:39071 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T10:30:11,939 DEBUG [RS:0;e2fd862922a0:39071 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T10:30:11,939 INFO [RS:0;e2fd862922a0:39071 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T10:30:11,939 INFO [RS:0;e2fd862922a0:39071 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T10:30:11,999 WARN [e2fd862922a0:37613 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T10:30:12,041 INFO [RS:0;e2fd862922a0:39071 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C39071%2C1732530611591, suffix=, logDir=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/WALs/e2fd862922a0,39071,1732530611591, archiveDir=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/oldWALs, maxLogs=32 2024-11-25T10:30:12,044 INFO [RS:0;e2fd862922a0:39071 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C39071%2C1732530611591.1732530612043 2024-11-25T10:30:12,063 INFO [RS:0;e2fd862922a0:39071 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/WALs/e2fd862922a0,39071,1732530611591/e2fd862922a0%2C39071%2C1732530611591.1732530612043 2024-11-25T10:30:12,065 DEBUG [RS:0;e2fd862922a0:39071 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37259:37259),(127.0.0.1/127.0.0.1:41621:41621)] 2024-11-25T10:30:12,250 DEBUG [e2fd862922a0:37613 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T10:30:12,250 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e2fd862922a0,39071,1732530611591 2024-11-25T10:30:12,252 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2fd862922a0,39071,1732530611591, state=OPENING 2024-11-25T10:30:12,254 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T10:30:12,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:12,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:12,256 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:30:12,257 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T10:30:12,257 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:30:12,257 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e2fd862922a0,39071,1732530611591}] 2024-11-25T10:30:12,411 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T10:30:12,413 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59665, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T10:30:12,417 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T10:30:12,417 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:30:12,420 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C39071%2C1732530611591.meta, suffix=.meta, logDir=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/WALs/e2fd862922a0,39071,1732530611591, archiveDir=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/oldWALs, maxLogs=32 2024-11-25T10:30:12,422 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C39071%2C1732530611591.meta.1732530612422.meta 2024-11-25T10:30:12,429 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/WALs/e2fd862922a0,39071,1732530611591/e2fd862922a0%2C39071%2C1732530611591.meta.1732530612422.meta 2024-11-25T10:30:12,431 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37259:37259),(127.0.0.1/127.0.0.1:41621:41621)] 2024-11-25T10:30:12,431 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:30:12,432 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T10:30:12,432 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T10:30:12,432 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T10:30:12,432 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T10:30:12,432 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:30:12,432 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T10:30:12,433 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T10:30:12,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T10:30:12,435 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T10:30:12,435 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:12,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:12,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T10:30:12,437 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T10:30:12,437 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:12,437 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:12,438 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T10:30:12,438 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T10:30:12,438 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:12,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:12,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T10:30:12,440 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T10:30:12,440 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:12,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:12,441 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T10:30:12,441 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/data/hbase/meta/1588230740 2024-11-25T10:30:12,443 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/data/hbase/meta/1588230740 2024-11-25T10:30:12,444 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T10:30:12,444 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T10:30:12,445 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T10:30:12,446 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T10:30:12,447 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=869726, jitterRate=0.10591411590576172}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T10:30:12,447 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T10:30:12,448 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732530612433Writing region info on filesystem at 1732530612433Initializing all the Stores at 1732530612434 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530612434Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530612434Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530612434Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530612434Cleaning up temporary data from old regions at 1732530612444 (+10 ms)Running coprocessor post-open hooks at 1732530612448 (+4 ms)Region opened successfully at 1732530612448 2024-11-25T10:30:12,450 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732530612410 2024-11-25T10:30:12,453 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T10:30:12,453 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T10:30:12,454 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e2fd862922a0,39071,1732530611591 2024-11-25T10:30:12,455 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2fd862922a0,39071,1732530611591, state=OPEN 2024-11-25T10:30:12,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T10:30:12,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T10:30:12,460 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:30:12,460 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:30:12,460 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e2fd862922a0,39071,1732530611591 2024-11-25T10:30:12,464 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T10:30:12,464 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e2fd862922a0,39071,1732530611591 in 203 msec 2024-11-25T10:30:12,468 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T10:30:12,468 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 619 msec 2024-11-25T10:30:12,469 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:30:12,469 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T10:30:12,471 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T10:30:12,471 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2fd862922a0,39071,1732530611591, seqNum=-1] 2024-11-25T10:30:12,471 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T10:30:12,473 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49223, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T10:30:12,481 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 685 msec 2024-11-25T10:30:12,481 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732530612481, completionTime=-1 2024-11-25T10:30:12,481 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T10:30:12,481 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T10:30:12,483 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T10:30:12,483 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732530672483 2024-11-25T10:30:12,483 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732530732483 2024-11-25T10:30:12,484 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-25T10:30:12,484 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,37613,1732530611536-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:12,484 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,37613,1732530611536-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:12,484 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,37613,1732530611536-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:12,484 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e2fd862922a0:37613, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:12,485 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:12,485 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:12,486 DEBUG [master/e2fd862922a0:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T10:30:12,490 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.838sec 2024-11-25T10:30:12,490 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T10:30:12,490 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T10:30:12,490 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T10:30:12,490 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T10:30:12,490 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T10:30:12,490 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,37613,1732530611536-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:30:12,490 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,37613,1732530611536-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T10:30:12,493 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T10:30:12,493 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T10:30:12,493 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,37613,1732530611536-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:12,531 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bf88cd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:30:12,531 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e2fd862922a0,37613,-1 for getting cluster id 2024-11-25T10:30:12,532 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T10:30:12,534 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e907214-f645-4754-ae06-706d2fa892d2' 2024-11-25T10:30:12,535 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T10:30:12,535 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e907214-f645-4754-ae06-706d2fa892d2" 2024-11-25T10:30:12,536 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b20cc47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:30:12,536 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2fd862922a0,37613,-1] 2024-11-25T10:30:12,536 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T10:30:12,537 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:12,539 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43136, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T10:30:12,541 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ffb657f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:30:12,541 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T10:30:12,543 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2fd862922a0,39071,1732530611591, seqNum=-1] 2024-11-25T10:30:12,543 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T10:30:12,546 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54842, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T10:30:12,548 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e2fd862922a0,37613,1732530611536 2024-11-25T10:30:12,549 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:12,554 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T10:30:12,554 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T10:30:12,554 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T10:30:12,555 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:30:12,555 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:12,555 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:12,555 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T10:30:12,555 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T10:30:12,556 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1038264842, stopped=false 2024-11-25T10:30:12,556 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e2fd862922a0,37613,1732530611536 2024-11-25T10:30:12,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:30:12,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:30:12,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:12,558 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T10:30:12,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:12,558 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T10:30:12,558 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:30:12,558 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:12,558 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:30:12,558 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2fd862922a0,39071,1732530611591' ***** 2024-11-25T10:30:12,559 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T10:30:12,559 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:30:12,559 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T10:30:12,559 INFO [RS:0;e2fd862922a0:39071 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T10:30:12,559 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T10:30:12,559 INFO [RS:0;e2fd862922a0:39071 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T10:30:12,559 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(959): stopping server e2fd862922a0,39071,1732530611591 2024-11-25T10:30:12,559 INFO [RS:0;e2fd862922a0:39071 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:30:12,559 INFO [RS:0;e2fd862922a0:39071 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e2fd862922a0:39071. 2024-11-25T10:30:12,560 DEBUG [RS:0;e2fd862922a0:39071 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:30:12,560 DEBUG [RS:0;e2fd862922a0:39071 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:12,560 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T10:30:12,560 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T10:30:12,560 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T10:30:12,560 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T10:30:12,560 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-25T10:30:12,560 DEBUG [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-25T10:30:12,560 DEBUG [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T10:30:12,560 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T10:30:12,561 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T10:30:12,561 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T10:30:12,561 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T10:30:12,561 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T10:30:12,561 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-25T10:30:12,589 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/data/hbase/meta/1588230740/.tmp/ns/df720c71dc3b42aba8b3e495710f0a22 is 43, key is default/ns:d/1732530612474/Put/seqid=0 2024-11-25T10:30:12,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741835_1011 (size=5153) 2024-11-25T10:30:12,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741835_1011 (size=5153) 2024-11-25T10:30:12,600 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/data/hbase/meta/1588230740/.tmp/ns/df720c71dc3b42aba8b3e495710f0a22 2024-11-25T10:30:12,611 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/data/hbase/meta/1588230740/.tmp/ns/df720c71dc3b42aba8b3e495710f0a22 as hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/data/hbase/meta/1588230740/ns/df720c71dc3b42aba8b3e495710f0a22 2024-11-25T10:30:12,621 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/data/hbase/meta/1588230740/ns/df720c71dc3b42aba8b3e495710f0a22, entries=2, sequenceid=6, filesize=5.0 K 2024-11-25T10:30:12,622 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 61ms, sequenceid=6, compaction requested=false 2024-11-25T10:30:12,622 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T10:30:12,629 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-25T10:30:12,630 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T10:30:12,630 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T10:30:12,631 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732530612560Running coprocessor pre-close hooks at 1732530612560Disabling compacts and flushes for region at 1732530612560Disabling writes for close at 1732530612561 (+1 ms)Obtaining lock to block concurrent updates at 1732530612561Preparing flush snapshotting stores in 1588230740 at 1732530612561Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732530612562 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732530612562Flushing 1588230740/ns: creating writer at 1732530612563 (+1 ms)Flushing 1588230740/ns: appending metadata at 1732530612588 (+25 ms)Flushing 1588230740/ns: closing flushed file at 1732530612589 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@642a62bf: reopening flushed file at 1732530612610 (+21 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 61ms, sequenceid=6, compaction requested=false at 1732530612622 (+12 ms)Writing region close event to WAL at 1732530612624 (+2 ms)Running coprocessor post-close hooks at 1732530612630 (+6 ms)Closed at 1732530612630 2024-11-25T10:30:12,631 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T10:30:12,761 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(976): stopping server e2fd862922a0,39071,1732530611591; all regions closed. 2024-11-25T10:30:12,761 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,762 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,762 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,762 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,762 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741834_1010 (size=1152) 2024-11-25T10:30:12,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741834_1010 (size=1152) 2024-11-25T10:30:12,768 DEBUG [RS:0;e2fd862922a0:39071 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/oldWALs 2024-11-25T10:30:12,768 INFO [RS:0;e2fd862922a0:39071 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C39071%2C1732530611591.meta:.meta(num 1732530612422) 2024-11-25T10:30:12,768 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,769 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,769 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,769 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,769 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741833_1009 (size=93) 2024-11-25T10:30:12,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741833_1009 (size=93) 2024-11-25T10:30:12,774 DEBUG [RS:0;e2fd862922a0:39071 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/oldWALs 2024-11-25T10:30:12,774 INFO [RS:0;e2fd862922a0:39071 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C39071%2C1732530611591:(num 1732530612043) 2024-11-25T10:30:12,774 DEBUG [RS:0;e2fd862922a0:39071 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:12,774 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:30:12,774 INFO [RS:0;e2fd862922a0:39071 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:30:12,774 INFO [RS:0;e2fd862922a0:39071 {}] hbase.ChoreService(370): Chore service for: regionserver/e2fd862922a0:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T10:30:12,775 INFO [RS:0;e2fd862922a0:39071 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:30:12,775 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:30:12,775 INFO [RS:0;e2fd862922a0:39071 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39071 2024-11-25T10:30:12,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:30:12,778 INFO [RS:0;e2fd862922a0:39071 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:30:12,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2fd862922a0,39071,1732530611591 2024-11-25T10:30:12,780 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2fd862922a0,39071,1732530611591] 2024-11-25T10:30:12,781 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2fd862922a0,39071,1732530611591 already deleted, retry=false 2024-11-25T10:30:12,782 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2fd862922a0,39071,1732530611591 expired; onlineServers=0 2024-11-25T10:30:12,782 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e2fd862922a0,37613,1732530611536' ***** 2024-11-25T10:30:12,782 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T10:30:12,782 INFO [M:0;e2fd862922a0:37613 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:30:12,782 INFO [M:0;e2fd862922a0:37613 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:30:12,782 DEBUG [M:0;e2fd862922a0:37613 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T10:30:12,782 DEBUG [M:0;e2fd862922a0:37613 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T10:30:12,782 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T10:30:12,782 DEBUG [master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530611800 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530611800,5,FailOnTimeoutGroup] 2024-11-25T10:30:12,782 DEBUG [master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530611800 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530611800,5,FailOnTimeoutGroup] 2024-11-25T10:30:12,782 INFO [M:0;e2fd862922a0:37613 {}] hbase.ChoreService(370): Chore service for: master/e2fd862922a0:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T10:30:12,782 INFO [M:0;e2fd862922a0:37613 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:30:12,782 DEBUG [M:0;e2fd862922a0:37613 {}] master.HMaster(1795): Stopping service threads 2024-11-25T10:30:12,782 INFO [M:0;e2fd862922a0:37613 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T10:30:12,783 INFO [M:0;e2fd862922a0:37613 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T10:30:12,783 INFO [M:0;e2fd862922a0:37613 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T10:30:12,783 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T10:30:12,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T10:30:12,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:12,785 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-25T10:30:12,785 DEBUG [RegionServerTracker-0 {}] master.ActiveMasterManager(353): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-25T10:30:12,786 INFO [M:0;e2fd862922a0:37613 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/.lastflushedseqids 2024-11-25T10:30:12,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741836_1012 (size=99) 2024-11-25T10:30:12,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741836_1012 (size=99) 2024-11-25T10:30:12,795 INFO [M:0;e2fd862922a0:37613 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T10:30:12,795 INFO [M:0;e2fd862922a0:37613 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T10:30:12,795 DEBUG [M:0;e2fd862922a0:37613 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T10:30:12,795 INFO [M:0;e2fd862922a0:37613 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:12,796 DEBUG [M:0;e2fd862922a0:37613 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:12,796 DEBUG [M:0;e2fd862922a0:37613 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T10:30:12,796 DEBUG [M:0;e2fd862922a0:37613 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:12,796 INFO [M:0;e2fd862922a0:37613 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-25T10:30:12,816 DEBUG [M:0;e2fd862922a0:37613 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/588db0a0a55244e888d85b95d1e29375 is 82, key is hbase:meta,,1/info:regioninfo/1732530612454/Put/seqid=0 2024-11-25T10:30:12,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741837_1013 (size=5672) 2024-11-25T10:30:12,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741837_1013 (size=5672) 2024-11-25T10:30:12,823 INFO [M:0;e2fd862922a0:37613 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/588db0a0a55244e888d85b95d1e29375 2024-11-25T10:30:12,845 DEBUG [M:0;e2fd862922a0:37613 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1fc166a15d7d42148fc5779828d3c73f is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732530612480/Put/seqid=0 2024-11-25T10:30:12,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741838_1014 (size=5275) 2024-11-25T10:30:12,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741838_1014 (size=5275) 2024-11-25T10:30:12,851 INFO [M:0;e2fd862922a0:37613 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1fc166a15d7d42148fc5779828d3c73f 2024-11-25T10:30:12,873 DEBUG [M:0;e2fd862922a0:37613 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cb0d3c1bcb124744ba8a56bbc457f4bd is 69, key is e2fd862922a0,39071,1732530611591/rs:state/1732530611861/Put/seqid=0 2024-11-25T10:30:12,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741839_1015 (size=5156) 2024-11-25T10:30:12,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741839_1015 (size=5156) 2024-11-25T10:30:12,879 INFO [M:0;e2fd862922a0:37613 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cb0d3c1bcb124744ba8a56bbc457f4bd 2024-11-25T10:30:12,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:30:12,880 INFO [RS:0;e2fd862922a0:39071 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:30:12,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1014ead0fcb0001, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:30:12,880 INFO [RS:0;e2fd862922a0:39071 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2fd862922a0,39071,1732530611591; zookeeper connection closed. 2024-11-25T10:30:12,881 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3d6f305d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3d6f305d 2024-11-25T10:30:12,881 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T10:30:12,901 DEBUG [M:0;e2fd862922a0:37613 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9bb7edcdda284ddb9b58056017f9f232 is 52, key is load_balancer_on/state:d/1732530612552/Put/seqid=0 2024-11-25T10:30:12,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741840_1016 (size=5056) 2024-11-25T10:30:12,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741840_1016 (size=5056) 2024-11-25T10:30:12,907 INFO [M:0;e2fd862922a0:37613 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9bb7edcdda284ddb9b58056017f9f232 2024-11-25T10:30:12,914 DEBUG [M:0;e2fd862922a0:37613 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/588db0a0a55244e888d85b95d1e29375 as hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/588db0a0a55244e888d85b95d1e29375 2024-11-25T10:30:12,922 INFO [M:0;e2fd862922a0:37613 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/588db0a0a55244e888d85b95d1e29375, entries=8, sequenceid=29, filesize=5.5 K 2024-11-25T10:30:12,923 DEBUG [M:0;e2fd862922a0:37613 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1fc166a15d7d42148fc5779828d3c73f as hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1fc166a15d7d42148fc5779828d3c73f 2024-11-25T10:30:12,930 INFO [M:0;e2fd862922a0:37613 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1fc166a15d7d42148fc5779828d3c73f, entries=3, sequenceid=29, filesize=5.2 K 2024-11-25T10:30:12,931 DEBUG [M:0;e2fd862922a0:37613 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cb0d3c1bcb124744ba8a56bbc457f4bd as hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cb0d3c1bcb124744ba8a56bbc457f4bd 2024-11-25T10:30:12,938 INFO [M:0;e2fd862922a0:37613 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cb0d3c1bcb124744ba8a56bbc457f4bd, entries=1, sequenceid=29, filesize=5.0 K 2024-11-25T10:30:12,939 DEBUG [M:0;e2fd862922a0:37613 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9bb7edcdda284ddb9b58056017f9f232 as hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9bb7edcdda284ddb9b58056017f9f232 2024-11-25T10:30:12,946 INFO [M:0;e2fd862922a0:37613 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39899/user/jenkins/test-data/c426147b-a349-93db-2d1e-5081fccb00bb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9bb7edcdda284ddb9b58056017f9f232, entries=1, sequenceid=29, filesize=4.9 K 2024-11-25T10:30:12,947 INFO [M:0;e2fd862922a0:37613 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 151ms, sequenceid=29, compaction requested=false 2024-11-25T10:30:12,949 INFO [M:0;e2fd862922a0:37613 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:12,949 DEBUG [M:0;e2fd862922a0:37613 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732530612795Disabling compacts and flushes for region at 1732530612795Disabling writes for close at 1732530612796 (+1 ms)Obtaining lock to block concurrent updates at 1732530612796Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732530612796Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732530612797 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732530612798 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732530612798Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732530612815 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732530612816 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732530612828 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732530612845 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732530612845Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732530612857 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732530612873 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732530612873Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732530612885 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732530612901 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732530612901Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e6fd25b: reopening flushed file at 1732530612913 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b8d6577: reopening flushed file at 1732530612922 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@243aab40: reopening flushed file at 1732530612930 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@505490e6: reopening flushed file at 1732530612938 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 151ms, sequenceid=29, compaction requested=false at 1732530612947 (+9 ms)Writing region close event to WAL at 1732530612949 (+2 ms)Closed at 1732530612949 2024-11-25T10:30:12,949 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,949 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,950 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,950 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,950 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:12,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33573 is added to blk_1073741830_1006 (size=10311) 2024-11-25T10:30:12,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33657 is added to blk_1073741830_1006 (size=10311) 2024-11-25T10:30:12,954 INFO [M:0;e2fd862922a0:37613 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T10:30:12,954 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:30:12,954 INFO [M:0;e2fd862922a0:37613 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37613 2024-11-25T10:30:12,954 INFO [M:0;e2fd862922a0:37613 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:30:13,056 INFO [M:0;e2fd862922a0:37613 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:30:13,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:30:13,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37613-0x1014ead0fcb0000, quorum=127.0.0.1:56926, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:30:13,059 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6838bf55{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:13,059 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@254e0164{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:30:13,059 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:30:13,059 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c1be80f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:30:13,060 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@274298f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/hadoop.log.dir/,STOPPED} 2024-11-25T10:30:13,061 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:30:13,061 WARN [BP-1525056479-172.17.0.2-1732530610673 heartbeating to localhost/127.0.0.1:39899 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:30:13,061 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:30:13,061 WARN [BP-1525056479-172.17.0.2-1732530610673 heartbeating to localhost/127.0.0.1:39899 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1525056479-172.17.0.2-1732530610673 (Datanode Uuid 1712f55e-eab1-43df-8985-3675dd251ec0) service to localhost/127.0.0.1:39899 2024-11-25T10:30:13,062 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/cluster_ad416049-3e59-7d3f-041e-ad37e75082be/data/data3/current/BP-1525056479-172.17.0.2-1732530610673 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:13,062 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/cluster_ad416049-3e59-7d3f-041e-ad37e75082be/data/data4/current/BP-1525056479-172.17.0.2-1732530610673 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:13,063 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:30:13,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2152d149{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:13,065 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1d281952{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:30:13,065 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:30:13,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@eab7acc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:30:13,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@469dec96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/hadoop.log.dir/,STOPPED} 2024-11-25T10:30:13,067 WARN [BP-1525056479-172.17.0.2-1732530610673 heartbeating to localhost/127.0.0.1:39899 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:30:13,067 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:30:13,067 WARN [BP-1525056479-172.17.0.2-1732530610673 heartbeating to localhost/127.0.0.1:39899 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1525056479-172.17.0.2-1732530610673 (Datanode Uuid b5d36adb-d28f-4ca5-9ac1-3f15cb4352a6) service to localhost/127.0.0.1:39899 2024-11-25T10:30:13,067 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:30:13,067 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/cluster_ad416049-3e59-7d3f-041e-ad37e75082be/data/data1/current/BP-1525056479-172.17.0.2-1732530610673 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:13,067 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/cluster_ad416049-3e59-7d3f-041e-ad37e75082be/data/data2/current/BP-1525056479-172.17.0.2-1732530610673 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:13,068 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:30:13,073 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b8ef2ff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T10:30:13,074 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10885b70{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:30:13,074 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:30:13,074 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f681677{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:30:13,074 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59505eb5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/hadoop.log.dir/,STOPPED} 2024-11-25T10:30:13,081 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T10:30:13,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T10:30:13,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T10:30:13,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/hadoop.log.dir so I do NOT create it in target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce 2024-11-25T10:30:13,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57641848-5bd4-cbe4-3975-c5ced6692a35/hadoop.tmp.dir so I do NOT create it in target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce 2024-11-25T10:30:13,097 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86, deleteOnExit=true 2024-11-25T10:30:13,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T10:30:13,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/test.cache.data in system properties and HBase conf 2024-11-25T10:30:13,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T10:30:13,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir in system properties and HBase conf 2024-11-25T10:30:13,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T10:30:13,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T10:30:13,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T10:30:13,098 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T10:30:13,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T10:30:13,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T10:30:13,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T10:30:13,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T10:30:13,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T10:30:13,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T10:30:13,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T10:30:13,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T10:30:13,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T10:30:13,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/nfs.dump.dir in system properties and HBase conf 2024-11-25T10:30:13,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/java.io.tmpdir in system properties and HBase conf 2024-11-25T10:30:13,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T10:30:13,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T10:30:13,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T10:30:13,114 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T10:30:13,194 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:30:13,199 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:30:13,200 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:30:13,200 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:30:13,201 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:30:13,201 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:30:13,202 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e7025d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:30:13,202 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17c48ca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:30:13,319 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a640c70{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/java.io.tmpdir/jetty-localhost-39693-hadoop-hdfs-3_4_1-tests_jar-_-any-16185603613935356102/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T10:30:13,319 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@26313eab{HTTP/1.1, (http/1.1)}{localhost:39693} 2024-11-25T10:30:13,320 INFO [Time-limited test {}] server.Server(415): Started @104832ms 2024-11-25T10:30:13,334 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T10:30:13,419 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:30:13,423 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:30:13,424 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:30:13,424 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:30:13,424 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:30:13,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bb4f47b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:30:13,425 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c3d2a60{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:30:13,541 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@475f8022{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/java.io.tmpdir/jetty-localhost-38979-hadoop-hdfs-3_4_1-tests_jar-_-any-2881485972787499268/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:13,541 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f8b6f27{HTTP/1.1, (http/1.1)}{localhost:38979} 2024-11-25T10:30:13,541 INFO [Time-limited test {}] server.Server(415): Started @105054ms 2024-11-25T10:30:13,543 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:30:13,576 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:30:13,579 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:30:13,580 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:30:13,580 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:30:13,580 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:30:13,581 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39a69c39{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:30:13,581 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@198c3788{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:30:13,646 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data1/current/BP-1978960096-172.17.0.2-1732530613132/current, will proceed with Du for space computation calculation, 2024-11-25T10:30:13,646 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data2/current/BP-1978960096-172.17.0.2-1732530613132/current, will proceed with Du for space computation calculation, 2024-11-25T10:30:13,677 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:30:13,680 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f2c08dd82b91de1 with lease ID 0xaaa0c3a9855046d5: Processing first storage report for DS-9b717984-592f-4576-850b-6ea2f4aa1752 from datanode DatanodeRegistration(127.0.0.1:35051, datanodeUuid=22f4e76f-dcf4-4d8b-86b7-f74f3d6f9055, infoPort=41477, infoSecurePort=0, ipcPort=40229, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132) 2024-11-25T10:30:13,680 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f2c08dd82b91de1 with lease ID 0xaaa0c3a9855046d5: from storage DS-9b717984-592f-4576-850b-6ea2f4aa1752 node DatanodeRegistration(127.0.0.1:35051, datanodeUuid=22f4e76f-dcf4-4d8b-86b7-f74f3d6f9055, infoPort=41477, infoSecurePort=0, ipcPort=40229, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:13,680 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f2c08dd82b91de1 with lease ID 0xaaa0c3a9855046d5: Processing first storage report for DS-c21e7b38-1c60-4a8a-bada-aea37fc0ef39 from datanode DatanodeRegistration(127.0.0.1:35051, datanodeUuid=22f4e76f-dcf4-4d8b-86b7-f74f3d6f9055, infoPort=41477, infoSecurePort=0, ipcPort=40229, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132) 2024-11-25T10:30:13,680 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f2c08dd82b91de1 with lease ID 0xaaa0c3a9855046d5: from storage DS-c21e7b38-1c60-4a8a-bada-aea37fc0ef39 node DatanodeRegistration(127.0.0.1:35051, datanodeUuid=22f4e76f-dcf4-4d8b-86b7-f74f3d6f9055, infoPort=41477, infoSecurePort=0, ipcPort=40229, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:13,706 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@72e5785b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/java.io.tmpdir/jetty-localhost-34703-hadoop-hdfs-3_4_1-tests_jar-_-any-7086018226902693252/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:13,706 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68370875{HTTP/1.1, (http/1.1)}{localhost:34703} 2024-11-25T10:30:13,707 INFO [Time-limited test {}] server.Server(415): Started @105219ms 2024-11-25T10:30:13,708 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:30:13,826 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data3/current/BP-1978960096-172.17.0.2-1732530613132/current, will proceed with Du for space computation calculation, 2024-11-25T10:30:13,826 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data4/current/BP-1978960096-172.17.0.2-1732530613132/current, will proceed with Du for space computation calculation, 2024-11-25T10:30:13,844 WARN [Thread-671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:30:13,846 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9282f874b232091e with lease ID 0xaaa0c3a9855046d6: Processing first storage report for DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80 from datanode DatanodeRegistration(127.0.0.1:45247, datanodeUuid=2cbdce5a-1e08-4a2d-aea4-9761244b9c0d, infoPort=39887, infoSecurePort=0, ipcPort=35705, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132) 2024-11-25T10:30:13,846 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9282f874b232091e with lease ID 0xaaa0c3a9855046d6: from storage DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80 node DatanodeRegistration(127.0.0.1:45247, datanodeUuid=2cbdce5a-1e08-4a2d-aea4-9761244b9c0d, infoPort=39887, infoSecurePort=0, ipcPort=35705, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:13,846 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9282f874b232091e with lease ID 0xaaa0c3a9855046d6: Processing first storage report for DS-1d3585b4-60ef-42d6-97bf-07ada3b99f50 from datanode DatanodeRegistration(127.0.0.1:45247, datanodeUuid=2cbdce5a-1e08-4a2d-aea4-9761244b9c0d, infoPort=39887, infoSecurePort=0, ipcPort=35705, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132) 2024-11-25T10:30:13,846 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9282f874b232091e with lease ID 0xaaa0c3a9855046d6: from storage DS-1d3585b4-60ef-42d6-97bf-07ada3b99f50 node DatanodeRegistration(127.0.0.1:45247, datanodeUuid=2cbdce5a-1e08-4a2d-aea4-9761244b9c0d, infoPort=39887, infoSecurePort=0, ipcPort=35705, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:13,898 INFO [regionserver/e2fd862922a0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:30:13,937 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce 2024-11-25T10:30:13,940 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/zookeeper_0, clientPort=64111, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T10:30:13,941 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64111 2024-11-25T10:30:13,942 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:13,943 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:13,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35051 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:30:13,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45247 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:30:13,955 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56 with version=8 2024-11-25T10:30:13,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/hbase-staging 2024-11-25T10:30:13,958 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:30:13,958 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:13,958 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:13,958 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:30:13,958 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:13,958 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:30:13,958 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T10:30:13,958 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:30:13,959 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42939 2024-11-25T10:30:13,960 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42939 connecting to ZooKeeper ensemble=127.0.0.1:64111 2024-11-25T10:30:13,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:429390x0, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:30:13,968 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42939-0x1014ead19430000 connected 2024-11-25T10:30:13,983 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:13,985 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:13,987 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:30:13,988 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56, hbase.cluster.distributed=false 2024-11-25T10:30:13,989 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:30:13,990 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42939 2024-11-25T10:30:13,990 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42939 2024-11-25T10:30:13,990 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42939 2024-11-25T10:30:13,991 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42939 2024-11-25T10:30:13,991 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42939 2024-11-25T10:30:14,008 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:30:14,008 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:14,008 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:14,008 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:30:14,008 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:14,008 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:30:14,008 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T10:30:14,008 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:30:14,009 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35207 2024-11-25T10:30:14,010 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35207 connecting to ZooKeeper ensemble=127.0.0.1:64111 2024-11-25T10:30:14,011 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:14,013 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:14,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:352070x0, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:30:14,018 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:352070x0, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:30:14,018 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35207-0x1014ead19430001 connected 2024-11-25T10:30:14,018 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T10:30:14,019 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T10:30:14,020 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T10:30:14,021 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:30:14,021 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35207 2024-11-25T10:30:14,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35207 2024-11-25T10:30:14,023 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35207 2024-11-25T10:30:14,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35207 2024-11-25T10:30:14,027 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35207 2024-11-25T10:30:14,039 DEBUG [M:0;e2fd862922a0:42939 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e2fd862922a0:42939 2024-11-25T10:30:14,040 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e2fd862922a0,42939,1732530613957 2024-11-25T10:30:14,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:30:14,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:30:14,042 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e2fd862922a0,42939,1732530613957 2024-11-25T10:30:14,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:14,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T10:30:14,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:14,046 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T10:30:14,046 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e2fd862922a0,42939,1732530613957 from backup master directory 2024-11-25T10:30:14,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e2fd862922a0,42939,1732530613957 2024-11-25T10:30:14,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:30:14,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:30:14,048 WARN [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:30:14,048 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e2fd862922a0,42939,1732530613957 2024-11-25T10:30:14,052 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/hbase.id] with ID: 2529e779-558e-4350-bb55-8d753068e9de 2024-11-25T10:30:14,052 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/.tmp/hbase.id 2024-11-25T10:30:14,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35051 is added to blk_1073741826_1002 (size=42) 2024-11-25T10:30:14,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45247 is added to blk_1073741826_1002 (size=42) 2024-11-25T10:30:14,059 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/.tmp/hbase.id]:[hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/hbase.id] 2024-11-25T10:30:14,072 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:14,073 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T10:30:14,074 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-25T10:30:14,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:14,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:14,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35051 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:30:14,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45247 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:30:14,094 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T10:30:14,096 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T10:30:14,096 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:30:14,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35051 is added to blk_1073741828_1004 (size=1189) 2024-11-25T10:30:14,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45247 is added to blk_1073741828_1004 (size=1189) 2024-11-25T10:30:14,107 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store 2024-11-25T10:30:14,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35051 is added to blk_1073741829_1005 (size=34) 2024-11-25T10:30:14,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45247 is added to blk_1073741829_1005 (size=34) 2024-11-25T10:30:14,117 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:30:14,117 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T10:30:14,117 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:14,117 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:14,117 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T10:30:14,117 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:14,117 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:30:14,117 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732530614117Disabling compacts and flushes for region at 1732530614117Disabling writes for close at 1732530614117Writing region close event to WAL at 1732530614117Closed at 1732530614117 2024-11-25T10:30:14,118 WARN [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/.initializing 2024-11-25T10:30:14,118 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/WALs/e2fd862922a0,42939,1732530613957 2024-11-25T10:30:14,122 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C42939%2C1732530613957, suffix=, logDir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/WALs/e2fd862922a0,42939,1732530613957, archiveDir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/oldWALs, maxLogs=10 2024-11-25T10:30:14,123 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C42939%2C1732530613957.1732530614122 2024-11-25T10:30:14,135 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/WALs/e2fd862922a0,42939,1732530613957/e2fd862922a0%2C42939%2C1732530613957.1732530614122 2024-11-25T10:30:14,139 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41477:41477),(127.0.0.1/127.0.0.1:39887:39887)] 2024-11-25T10:30:14,144 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:30:14,145 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:30:14,145 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:14,145 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:14,147 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:14,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T10:30:14,149 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:14,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:14,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:14,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T10:30:14,151 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:14,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:30:14,152 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:14,154 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T10:30:14,154 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:14,154 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:30:14,154 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:14,156 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T10:30:14,156 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:14,157 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:30:14,157 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:14,158 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:14,158 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:14,160 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:14,160 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:14,160 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T10:30:14,162 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:30:14,165 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:30:14,165 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727357, jitterRate=-0.07511819899082184}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T10:30:14,167 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732530614145Initializing all the Stores at 1732530614146 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530614146Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530614146Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530614146Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530614147 (+1 ms)Cleaning up temporary data from old regions at 1732530614160 (+13 ms)Region opened successfully at 1732530614167 (+7 ms) 2024-11-25T10:30:14,168 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T10:30:14,172 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b6b12b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:30:14,173 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T10:30:14,173 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T10:30:14,173 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T10:30:14,173 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T10:30:14,174 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T10:30:14,174 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T10:30:14,174 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T10:30:14,177 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T10:30:14,178 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T10:30:14,179 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T10:30:14,180 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T10:30:14,180 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T10:30:14,183 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T10:30:14,183 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T10:30:14,184 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T10:30:14,186 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T10:30:14,187 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T10:30:14,189 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T10:30:14,191 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T10:30:14,192 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T10:30:14,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T10:30:14,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:14,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T10:30:14,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:14,196 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e2fd862922a0,42939,1732530613957, sessionid=0x1014ead19430000, setting cluster-up flag (Was=false) 2024-11-25T10:30:14,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:14,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:14,205 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T10:30:14,207 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2fd862922a0,42939,1732530613957 2024-11-25T10:30:14,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:14,214 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:14,222 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T10:30:14,224 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2fd862922a0,42939,1732530613957 2024-11-25T10:30:14,226 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T10:30:14,230 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T10:30:14,231 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T10:30:14,231 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T10:30:14,231 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e2fd862922a0,42939,1732530613957 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T10:30:14,233 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:30:14,233 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:30:14,233 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:30:14,233 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:30:14,233 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e2fd862922a0:0, corePoolSize=10, maxPoolSize=10 2024-11-25T10:30:14,233 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,233 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:30:14,233 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,234 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(746): ClusterId : 2529e779-558e-4350-bb55-8d753068e9de 2024-11-25T10:30:14,234 DEBUG [RS:0;e2fd862922a0:35207 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T10:30:14,237 DEBUG [RS:0;e2fd862922a0:35207 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T10:30:14,237 DEBUG [RS:0;e2fd862922a0:35207 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T10:30:14,240 DEBUG [RS:0;e2fd862922a0:35207 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T10:30:14,241 DEBUG [RS:0;e2fd862922a0:35207 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6aa3efaf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:30:14,247 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732530644247 2024-11-25T10:30:14,247 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T10:30:14,247 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T10:30:14,247 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T10:30:14,248 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T10:30:14,248 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T10:30:14,248 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T10:30:14,248 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:30:14,248 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,248 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T10:30:14,249 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T10:30:14,249 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T10:30:14,249 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T10:30:14,250 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T10:30:14,250 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T10:30:14,250 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:14,250 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530614250,5,FailOnTimeoutGroup] 2024-11-25T10:30:14,250 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T10:30:14,252 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530614251,5,FailOnTimeoutGroup] 2024-11-25T10:30:14,252 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,252 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T10:30:14,252 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,253 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,260 DEBUG [RS:0;e2fd862922a0:35207 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e2fd862922a0:35207 2024-11-25T10:30:14,260 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T10:30:14,260 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T10:30:14,260 DEBUG [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T10:30:14,261 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2fd862922a0,42939,1732530613957 with port=35207, startcode=1732530614007 2024-11-25T10:30:14,261 DEBUG [RS:0;e2fd862922a0:35207 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T10:30:14,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45247 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:30:14,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35051 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:30:14,268 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33067, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T10:30:14,268 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T10:30:14,268 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56 2024-11-25T10:30:14,269 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42939 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2fd862922a0,35207,1732530614007 2024-11-25T10:30:14,269 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42939 {}] master.ServerManager(517): Registering regionserver=e2fd862922a0,35207,1732530614007 2024-11-25T10:30:14,273 DEBUG [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56 2024-11-25T10:30:14,273 DEBUG [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46735 2024-11-25T10:30:14,273 DEBUG [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T10:30:14,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:30:14,276 DEBUG [RS:0;e2fd862922a0:35207 {}] zookeeper.ZKUtil(111): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2fd862922a0,35207,1732530614007 2024-11-25T10:30:14,276 WARN [RS:0;e2fd862922a0:35207 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:30:14,276 INFO [RS:0;e2fd862922a0:35207 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:30:14,277 DEBUG [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007 2024-11-25T10:30:14,278 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2fd862922a0,35207,1732530614007] 2024-11-25T10:30:14,290 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T10:30:14,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45247 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:30:14,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35051 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:30:14,295 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:30:14,296 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T10:30:14,304 INFO [RS:0;e2fd862922a0:35207 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T10:30:14,305 INFO [RS:0;e2fd862922a0:35207 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,308 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T10:30:14,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T10:30:14,309 INFO [RS:0;e2fd862922a0:35207 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T10:30:14,310 INFO [RS:0;e2fd862922a0:35207 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,310 DEBUG [RS:0;e2fd862922a0:35207 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,310 DEBUG [RS:0;e2fd862922a0:35207 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,310 DEBUG [RS:0;e2fd862922a0:35207 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,310 DEBUG [RS:0;e2fd862922a0:35207 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,310 DEBUG [RS:0;e2fd862922a0:35207 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,310 DEBUG [RS:0;e2fd862922a0:35207 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:30:14,310 DEBUG [RS:0;e2fd862922a0:35207 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,310 DEBUG [RS:0;e2fd862922a0:35207 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,310 DEBUG [RS:0;e2fd862922a0:35207 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,311 DEBUG [RS:0;e2fd862922a0:35207 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,311 DEBUG [RS:0;e2fd862922a0:35207 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,311 DEBUG [RS:0;e2fd862922a0:35207 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,311 DEBUG [RS:0;e2fd862922a0:35207 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:30:14,311 DEBUG [RS:0;e2fd862922a0:35207 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:30:14,311 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T10:30:14,311 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:14,312 INFO [RS:0;e2fd862922a0:35207 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,312 INFO [RS:0;e2fd862922a0:35207 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,312 INFO [RS:0;e2fd862922a0:35207 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,312 INFO [RS:0;e2fd862922a0:35207 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:14,312 INFO [RS:0;e2fd862922a0:35207 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,312 INFO [RS:0;e2fd862922a0:35207 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,35207,1732530614007-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:30:14,312 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T10:30:14,314 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T10:30:14,314 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:14,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:14,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T10:30:14,317 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T10:30:14,317 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:14,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:14,318 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T10:30:14,319 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T10:30:14,320 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:14,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:14,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T10:30:14,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740 2024-11-25T10:30:14,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740 2024-11-25T10:30:14,323 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T10:30:14,323 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T10:30:14,324 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T10:30:14,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T10:30:14,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T10:30:14,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T10:30:14,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-25T10:30:14,329 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:30:14,329 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=831208, jitterRate=0.05693568289279938}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T10:30:14,330 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732530614296Initializing all the Stores at 1732530614297 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530614297Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530614308 (+11 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530614309 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530614309Cleaning up temporary data from old regions at 1732530614323 (+14 ms)Region opened successfully at 1732530614330 (+7 ms) 2024-11-25T10:30:14,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T10:30:14,331 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T10:30:14,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T10:30:14,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T10:30:14,331 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T10:30:14,333 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T10:30:14,333 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732530614331Disabling compacts and flushes for region at 1732530614331Disabling writes for close at 1732530614331Writing region close event to WAL at 1732530614332 (+1 ms)Closed at 1732530614332 2024-11-25T10:30:14,334 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:30:14,334 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T10:30:14,334 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T10:30:14,336 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T10:30:14,338 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T10:30:14,338 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T10:30:14,338 INFO [RS:0;e2fd862922a0:35207 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,35207,1732530614007-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,338 INFO [RS:0;e2fd862922a0:35207 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,338 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.Replication(171): e2fd862922a0,35207,1732530614007 started 2024-11-25T10:30:14,361 INFO [RS:0;e2fd862922a0:35207 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,362 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(1482): Serving as e2fd862922a0,35207,1732530614007, RpcServer on e2fd862922a0/172.17.0.2:35207, sessionid=0x1014ead19430001 2024-11-25T10:30:14,362 DEBUG [RS:0;e2fd862922a0:35207 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T10:30:14,362 DEBUG [RS:0;e2fd862922a0:35207 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2fd862922a0,35207,1732530614007 2024-11-25T10:30:14,362 DEBUG [RS:0;e2fd862922a0:35207 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,35207,1732530614007' 2024-11-25T10:30:14,362 DEBUG [RS:0;e2fd862922a0:35207 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T10:30:14,363 DEBUG [RS:0;e2fd862922a0:35207 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T10:30:14,363 DEBUG [RS:0;e2fd862922a0:35207 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T10:30:14,363 DEBUG [RS:0;e2fd862922a0:35207 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T10:30:14,363 DEBUG [RS:0;e2fd862922a0:35207 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2fd862922a0,35207,1732530614007 2024-11-25T10:30:14,363 DEBUG [RS:0;e2fd862922a0:35207 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,35207,1732530614007' 2024-11-25T10:30:14,363 DEBUG [RS:0;e2fd862922a0:35207 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T10:30:14,364 DEBUG [RS:0;e2fd862922a0:35207 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T10:30:14,364 DEBUG [RS:0;e2fd862922a0:35207 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T10:30:14,364 INFO [RS:0;e2fd862922a0:35207 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T10:30:14,364 INFO [RS:0;e2fd862922a0:35207 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T10:30:14,467 INFO [RS:0;e2fd862922a0:35207 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C35207%2C1732530614007, suffix=, logDir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007, archiveDir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/oldWALs, maxLogs=32 2024-11-25T10:30:14,468 INFO [RS:0;e2fd862922a0:35207 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C35207%2C1732530614007.1732530614468 2024-11-25T10:30:14,475 INFO [RS:0;e2fd862922a0:35207 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 2024-11-25T10:30:14,476 DEBUG [RS:0;e2fd862922a0:35207 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39887:39887),(127.0.0.1/127.0.0.1:41477:41477)] 2024-11-25T10:30:14,488 DEBUG [e2fd862922a0:42939 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T10:30:14,489 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e2fd862922a0,35207,1732530614007 2024-11-25T10:30:14,491 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2fd862922a0,35207,1732530614007, state=OPENING 2024-11-25T10:30:14,492 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T10:30:14,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:14,494 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:14,495 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:30:14,495 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T10:30:14,495 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:30:14,495 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e2fd862922a0,35207,1732530614007}] 2024-11-25T10:30:14,649 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T10:30:14,652 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40743, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T10:30:14,656 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T10:30:14,656 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:30:14,658 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C35207%2C1732530614007.meta, suffix=.meta, logDir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007, archiveDir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/oldWALs, maxLogs=32 2024-11-25T10:30:14,659 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta 2024-11-25T10:30:14,664 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta 2024-11-25T10:30:14,665 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41477:41477),(127.0.0.1/127.0.0.1:39887:39887)] 2024-11-25T10:30:14,666 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:30:14,666 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T10:30:14,666 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T10:30:14,666 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T10:30:14,667 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T10:30:14,667 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:30:14,667 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T10:30:14,667 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T10:30:14,668 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T10:30:14,669 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T10:30:14,669 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:14,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:14,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T10:30:14,671 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T10:30:14,671 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:14,671 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:14,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T10:30:14,672 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T10:30:14,673 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:14,673 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:14,673 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T10:30:14,674 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T10:30:14,674 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:14,674 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:30:14,674 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T10:30:14,675 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740 2024-11-25T10:30:14,677 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740 2024-11-25T10:30:14,678 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T10:30:14,678 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T10:30:14,679 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T10:30:14,680 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T10:30:14,681 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=865961, jitterRate=0.10112740099430084}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T10:30:14,681 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T10:30:14,682 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732530614667Writing region info on filesystem at 1732530614667Initializing all the Stores at 1732530614668 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530614668Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530614668Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530614668Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530614668Cleaning up temporary data from old regions at 1732530614678 (+10 ms)Running coprocessor post-open hooks at 1732530614681 (+3 ms)Region opened successfully at 1732530614682 (+1 ms) 2024-11-25T10:30:14,683 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732530614649 2024-11-25T10:30:14,686 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T10:30:14,686 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T10:30:14,687 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e2fd862922a0,35207,1732530614007 2024-11-25T10:30:14,688 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2fd862922a0,35207,1732530614007, state=OPEN 2024-11-25T10:30:14,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T10:30:14,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T10:30:14,694 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e2fd862922a0,35207,1732530614007 2024-11-25T10:30:14,694 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:30:14,694 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:30:14,697 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T10:30:14,697 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e2fd862922a0,35207,1732530614007 in 199 msec 2024-11-25T10:30:14,700 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T10:30:14,700 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 363 msec 2024-11-25T10:30:14,701 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:30:14,701 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T10:30:14,702 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T10:30:14,702 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2fd862922a0,35207,1732530614007, seqNum=-1] 2024-11-25T10:30:14,703 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T10:30:14,704 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56931, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T10:30:14,710 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 480 msec 2024-11-25T10:30:14,710 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732530614710, completionTime=-1 2024-11-25T10:30:14,710 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T10:30:14,710 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T10:30:14,712 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:14,712 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T10:30:14,712 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732530674712 2024-11-25T10:30:14,712 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732530734712 2024-11-25T10:30:14,712 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-25T10:30:14,713 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,42939,1732530613957-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,713 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,42939,1732530613957-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,713 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,42939,1732530613957-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,713 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e2fd862922a0:42939, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,713 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,713 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,715 DEBUG [master/e2fd862922a0:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T10:30:14,719 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.670sec 2024-11-25T10:30:14,719 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T10:30:14,719 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T10:30:14,719 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T10:30:14,719 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T10:30:14,719 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T10:30:14,719 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,42939,1732530613957-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:30:14,719 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,42939,1732530613957-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T10:30:14,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:14,722 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T10:30:14,722 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T10:30:14,722 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,42939,1732530613957-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,734 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cae84f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:30:14,734 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e2fd862922a0,42939,-1 for getting cluster id 2024-11-25T10:30:14,734 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T10:30:14,736 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2529e779-558e-4350-bb55-8d753068e9de' 2024-11-25T10:30:14,736 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T10:30:14,736 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2529e779-558e-4350-bb55-8d753068e9de" 2024-11-25T10:30:14,736 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2db3caf8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:30:14,736 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2fd862922a0,42939,-1] 2024-11-25T10:30:14,737 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T10:30:14,737 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:14,739 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50890, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T10:30:14,740 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd7b3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:30:14,740 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T10:30:14,741 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2fd862922a0,35207,1732530614007, seqNum=-1] 2024-11-25T10:30:14,741 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T10:30:14,743 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54072, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T10:30:14,744 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e2fd862922a0,42939,1732530613957 2024-11-25T10:30:14,745 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:14,748 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T10:30:14,789 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:30:14,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:14,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:14,789 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:30:14,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:30:14,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:30:14,789 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T10:30:14,789 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:30:14,790 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35123 2024-11-25T10:30:14,791 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35123 connecting to ZooKeeper ensemble=127.0.0.1:64111 2024-11-25T10:30:14,792 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:14,793 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:30:14,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:351230x0, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:30:14,798 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:351230x0, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-25T10:30:14,798 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-25T10:30:14,798 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35123-0x1014ead19430002 connected 2024-11-25T10:30:14,798 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T10:30:14,799 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T10:30:14,800 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35123-0x1014ead19430002, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T10:30:14,801 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35123-0x1014ead19430002, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:30:14,804 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35123 2024-11-25T10:30:14,804 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35123 2024-11-25T10:30:14,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35123 2024-11-25T10:30:14,808 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35123 2024-11-25T10:30:14,808 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35123 2024-11-25T10:30:14,810 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.HRegionServer(746): ClusterId : 2529e779-558e-4350-bb55-8d753068e9de 2024-11-25T10:30:14,810 DEBUG [RS:1;e2fd862922a0:35123 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T10:30:14,812 DEBUG [RS:1;e2fd862922a0:35123 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T10:30:14,812 DEBUG [RS:1;e2fd862922a0:35123 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T10:30:14,814 DEBUG [RS:1;e2fd862922a0:35123 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T10:30:14,814 DEBUG [RS:1;e2fd862922a0:35123 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@768daa7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:30:14,827 DEBUG [RS:1;e2fd862922a0:35123 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;e2fd862922a0:35123 2024-11-25T10:30:14,827 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T10:30:14,827 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T10:30:14,827 DEBUG [RS:1;e2fd862922a0:35123 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T10:30:14,828 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2fd862922a0,42939,1732530613957 with port=35123, startcode=1732530614788 2024-11-25T10:30:14,828 DEBUG [RS:1;e2fd862922a0:35123 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T10:30:14,830 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43203, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T10:30:14,831 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42939 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2fd862922a0,35123,1732530614788 2024-11-25T10:30:14,831 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42939 {}] master.ServerManager(517): Registering regionserver=e2fd862922a0,35123,1732530614788 2024-11-25T10:30:14,833 DEBUG [RS:1;e2fd862922a0:35123 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56 2024-11-25T10:30:14,833 DEBUG [RS:1;e2fd862922a0:35123 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46735 2024-11-25T10:30:14,833 DEBUG [RS:1;e2fd862922a0:35123 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T10:30:14,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:30:14,839 DEBUG [RS:1;e2fd862922a0:35123 {}] zookeeper.ZKUtil(111): regionserver:35123-0x1014ead19430002, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2fd862922a0,35123,1732530614788 2024-11-25T10:30:14,839 WARN [RS:1;e2fd862922a0:35123 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:30:14,839 INFO [RS:1;e2fd862922a0:35123 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:30:14,839 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2fd862922a0,35123,1732530614788] 2024-11-25T10:30:14,839 DEBUG [RS:1;e2fd862922a0:35123 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788 2024-11-25T10:30:14,843 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T10:30:14,845 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T10:30:14,845 INFO [RS:1;e2fd862922a0:35123 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T10:30:14,845 INFO [RS:1;e2fd862922a0:35123 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,845 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T10:30:14,846 INFO [RS:1;e2fd862922a0:35123 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T10:30:14,846 INFO [RS:1;e2fd862922a0:35123 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,846 DEBUG [RS:1;e2fd862922a0:35123 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,847 DEBUG [RS:1;e2fd862922a0:35123 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,847 DEBUG [RS:1;e2fd862922a0:35123 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,847 DEBUG [RS:1;e2fd862922a0:35123 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,847 DEBUG [RS:1;e2fd862922a0:35123 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,847 DEBUG [RS:1;e2fd862922a0:35123 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:30:14,847 DEBUG [RS:1;e2fd862922a0:35123 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,847 DEBUG [RS:1;e2fd862922a0:35123 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,847 DEBUG [RS:1;e2fd862922a0:35123 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,847 DEBUG [RS:1;e2fd862922a0:35123 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,847 DEBUG [RS:1;e2fd862922a0:35123 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,847 DEBUG [RS:1;e2fd862922a0:35123 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:30:14,847 DEBUG [RS:1;e2fd862922a0:35123 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:30:14,847 DEBUG [RS:1;e2fd862922a0:35123 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:30:14,847 INFO [RS:1;e2fd862922a0:35123 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,847 INFO [RS:1;e2fd862922a0:35123 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,847 INFO [RS:1;e2fd862922a0:35123 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,847 INFO [RS:1;e2fd862922a0:35123 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,848 INFO [RS:1;e2fd862922a0:35123 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,848 INFO [RS:1;e2fd862922a0:35123 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,35123,1732530614788-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:30:14,862 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T10:30:14,862 INFO [RS:1;e2fd862922a0:35123 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,35123,1732530614788-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,863 INFO [RS:1;e2fd862922a0:35123 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,863 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.Replication(171): e2fd862922a0,35123,1732530614788 started 2024-11-25T10:30:14,877 INFO [RS:1;e2fd862922a0:35123 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:30:14,877 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.HRegionServer(1482): Serving as e2fd862922a0,35123,1732530614788, RpcServer on e2fd862922a0/172.17.0.2:35123, sessionid=0x1014ead19430002 2024-11-25T10:30:14,877 DEBUG [RS:1;e2fd862922a0:35123 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T10:30:14,877 DEBUG [RS:1;e2fd862922a0:35123 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2fd862922a0,35123,1732530614788 2024-11-25T10:30:14,877 DEBUG [RS:1;e2fd862922a0:35123 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,35123,1732530614788' 2024-11-25T10:30:14,877 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;e2fd862922a0:35123,5,FailOnTimeoutGroup] 2024-11-25T10:30:14,877 DEBUG [RS:1;e2fd862922a0:35123 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T10:30:14,877 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-25T10:30:14,878 DEBUG [RS:1;e2fd862922a0:35123 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T10:30:14,878 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T10:30:14,878 DEBUG [RS:1;e2fd862922a0:35123 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T10:30:14,878 DEBUG [RS:1;e2fd862922a0:35123 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T10:30:14,878 DEBUG [RS:1;e2fd862922a0:35123 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2fd862922a0,35123,1732530614788 2024-11-25T10:30:14,878 DEBUG [RS:1;e2fd862922a0:35123 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,35123,1732530614788' 2024-11-25T10:30:14,878 DEBUG [RS:1;e2fd862922a0:35123 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T10:30:14,879 DEBUG [RS:1;e2fd862922a0:35123 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T10:30:14,879 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is e2fd862922a0,42939,1732530613957 2024-11-25T10:30:14,879 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@f44dd06 2024-11-25T10:30:14,879 DEBUG [RS:1;e2fd862922a0:35123 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T10:30:14,879 INFO [RS:1;e2fd862922a0:35123 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T10:30:14,879 INFO [RS:1;e2fd862922a0:35123 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T10:30:14,879 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T10:30:14,881 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50900, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T10:30:14,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42939 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T10:30:14,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42939 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T10:30:14,882 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42939 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T10:30:14,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42939 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-25T10:30:14,885 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T10:30:14,885 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:14,885 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42939 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-25T10:30:14,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42939 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T10:30:14,887 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T10:30:14,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35051 is added to blk_1073741835_1011 (size=393) 2024-11-25T10:30:14,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45247 is added to blk_1073741835_1011 (size=393) 2024-11-25T10:30:14,896 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4e397356122b717e0fcde1a373944b96, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56 2024-11-25T10:30:14,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45247 is added to blk_1073741836_1012 (size=76) 2024-11-25T10:30:14,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35051 is added to blk_1073741836_1012 (size=76) 2024-11-25T10:30:14,907 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:30:14,907 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 4e397356122b717e0fcde1a373944b96, disabling compactions & flushes 2024-11-25T10:30:14,907 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:14,907 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:14,907 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. after waiting 0 ms 2024-11-25T10:30:14,907 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:14,907 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:14,907 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4e397356122b717e0fcde1a373944b96: Waiting for close lock at 1732530614907Disabling compacts and flushes for region at 1732530614907Disabling writes for close at 1732530614907Writing region close event to WAL at 1732530614907Closed at 1732530614907 2024-11-25T10:30:14,909 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T10:30:14,909 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732530614909"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732530614909"}]},"ts":"1732530614909"} 2024-11-25T10:30:14,912 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T10:30:14,913 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T10:30:14,913 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732530614913"}]},"ts":"1732530614913"} 2024-11-25T10:30:14,916 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-25T10:30:14,916 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4e397356122b717e0fcde1a373944b96, ASSIGN}] 2024-11-25T10:30:14,918 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4e397356122b717e0fcde1a373944b96, ASSIGN 2024-11-25T10:30:14,919 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4e397356122b717e0fcde1a373944b96, ASSIGN; state=OFFLINE, location=e2fd862922a0,35207,1732530614007; forceNewPlan=false, retain=false 2024-11-25T10:30:14,981 INFO [RS:1;e2fd862922a0:35123 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C35123%2C1732530614788, suffix=, logDir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788, archiveDir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/oldWALs, maxLogs=32 2024-11-25T10:30:14,982 INFO [RS:1;e2fd862922a0:35123 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C35123%2C1732530614788.1732530614982 2024-11-25T10:30:14,988 INFO [RS:1;e2fd862922a0:35123 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 2024-11-25T10:30:14,989 DEBUG [RS:1;e2fd862922a0:35123 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41477:41477),(127.0.0.1/127.0.0.1:39887:39887)] 2024-11-25T10:30:15,070 INFO [e2fd862922a0:42939 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-25T10:30:15,070 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4e397356122b717e0fcde1a373944b96, regionState=OPENING, regionLocation=e2fd862922a0,35207,1732530614007 2024-11-25T10:30:15,074 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4e397356122b717e0fcde1a373944b96, ASSIGN because future has completed 2024-11-25T10:30:15,075 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4e397356122b717e0fcde1a373944b96, server=e2fd862922a0,35207,1732530614007}] 2024-11-25T10:30:15,232 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T10:30:15,234 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:15,234 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4e397356122b717e0fcde1a373944b96, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:30:15,234 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:15,235 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:15,235 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:30:15,235 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:15,235 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:15,238 INFO [StoreOpener-4e397356122b717e0fcde1a373944b96-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:15,240 INFO [StoreOpener-4e397356122b717e0fcde1a373944b96-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4e397356122b717e0fcde1a373944b96 columnFamilyName info 2024-11-25T10:30:15,240 DEBUG [StoreOpener-4e397356122b717e0fcde1a373944b96-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:30:15,241 INFO [StoreOpener-4e397356122b717e0fcde1a373944b96-1 {}] regionserver.HStore(327): Store=4e397356122b717e0fcde1a373944b96/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:30:15,241 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:15,242 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:15,242 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:15,243 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:15,243 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:15,245 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:15,248 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:30:15,248 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4e397356122b717e0fcde1a373944b96; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=855292, jitterRate=0.08756031095981598}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T10:30:15,248 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:15,249 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4e397356122b717e0fcde1a373944b96: Running coprocessor pre-open hook at 1732530615235Writing region info on filesystem at 1732530615235Initializing all the Stores at 1732530615236 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530615236Cleaning up temporary data from old regions at 1732530615243 (+7 ms)Running coprocessor post-open hooks at 1732530615248 (+5 ms)Region opened successfully at 1732530615249 (+1 ms) 2024-11-25T10:30:15,251 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96., pid=6, masterSystemTime=1732530615228 2024-11-25T10:30:15,253 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:15,254 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:15,254 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4e397356122b717e0fcde1a373944b96, regionState=OPEN, openSeqNum=2, regionLocation=e2fd862922a0,35207,1732530614007 2024-11-25T10:30:15,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:15,257 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:15,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4e397356122b717e0fcde1a373944b96, server=e2fd862922a0,35207,1732530614007 because future has completed 2024-11-25T10:30:15,258 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:15,262 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T10:30:15,262 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4e397356122b717e0fcde1a373944b96, server=e2fd862922a0,35207,1732530614007 in 184 msec 2024-11-25T10:30:15,265 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T10:30:15,265 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=4e397356122b717e0fcde1a373944b96, ASSIGN in 346 msec 2024-11-25T10:30:15,266 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T10:30:15,266 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732530615266"}]},"ts":"1732530615266"} 2024-11-25T10:30:15,268 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-25T10:30:15,269 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T10:30:15,271 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 387 msec 2024-11-25T10:30:20,169 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T10:30:20,171 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:20,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:20,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:20,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:20,290 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-25T10:30:24,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T10:30:24,327 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-25T10:30:24,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-25T10:30:24,328 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-25T10:30:24,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T10:30:24,328 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-25T10:30:24,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42939 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T10:30:24,942 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-25T10:30:24,942 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-25T10:30:24,945 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-25T10:30:24,945 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:24,958 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:30:24,962 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:30:24,963 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:30:24,963 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:30:24,963 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T10:30:24,964 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@402062d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:30:24,964 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@219c70cc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:30:25,079 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43e34915{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/java.io.tmpdir/jetty-localhost-33471-hadoop-hdfs-3_4_1-tests_jar-_-any-14327772802116183563/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:25,079 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d7fa207{HTTP/1.1, (http/1.1)}{localhost:33471} 2024-11-25T10:30:25,079 INFO [Time-limited test {}] server.Server(415): Started @116592ms 2024-11-25T10:30:25,081 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:30:25,114 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:30:25,117 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:30:25,118 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:30:25,118 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:30:25,118 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:30:25,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c5e4864{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:30:25,119 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4fb99827{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:30:25,178 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data6/current/BP-1978960096-172.17.0.2-1732530613132/current, will proceed with Du for space computation calculation, 2024-11-25T10:30:25,178 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data5/current/BP-1978960096-172.17.0.2-1732530613132/current, will proceed with Du for space computation calculation, 2024-11-25T10:30:25,195 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:30:25,198 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2ca0a81f53d604c8 with lease ID 0xaaa0c3a9855046d7: Processing first storage report for DS-c8e65e98-181d-4efb-96b1-3044c3a72886 from datanode DatanodeRegistration(127.0.0.1:41177, datanodeUuid=74310fe5-4036-4c76-b868-490e9f34781c, infoPort=44631, infoSecurePort=0, ipcPort=39045, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132) 2024-11-25T10:30:25,198 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2ca0a81f53d604c8 with lease ID 0xaaa0c3a9855046d7: from storage DS-c8e65e98-181d-4efb-96b1-3044c3a72886 node DatanodeRegistration(127.0.0.1:41177, datanodeUuid=74310fe5-4036-4c76-b868-490e9f34781c, infoPort=44631, infoSecurePort=0, ipcPort=39045, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:25,198 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2ca0a81f53d604c8 with lease ID 0xaaa0c3a9855046d7: Processing first storage report for DS-c4aacdbe-8292-4f96-b33a-58619dd8f74f from datanode DatanodeRegistration(127.0.0.1:41177, datanodeUuid=74310fe5-4036-4c76-b868-490e9f34781c, infoPort=44631, infoSecurePort=0, ipcPort=39045, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132) 2024-11-25T10:30:25,198 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2ca0a81f53d604c8 with lease ID 0xaaa0c3a9855046d7: from storage DS-c4aacdbe-8292-4f96-b33a-58619dd8f74f node DatanodeRegistration(127.0.0.1:41177, datanodeUuid=74310fe5-4036-4c76-b868-490e9f34781c, infoPort=44631, infoSecurePort=0, ipcPort=39045, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:25,237 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@233be953{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/java.io.tmpdir/jetty-localhost-46809-hadoop-hdfs-3_4_1-tests_jar-_-any-5265876270907806878/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:25,238 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@71810790{HTTP/1.1, (http/1.1)}{localhost:46809} 2024-11-25T10:30:25,238 INFO [Time-limited test {}] server.Server(415): Started @116750ms 2024-11-25T10:30:25,239 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:30:25,270 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:30:25,274 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:30:25,275 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:30:25,275 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:30:25,275 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T10:30:25,277 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25509568{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:30:25,277 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41559526{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:30:25,332 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data7/current/BP-1978960096-172.17.0.2-1732530613132/current, will proceed with Du for space computation calculation, 2024-11-25T10:30:25,332 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data8/current/BP-1978960096-172.17.0.2-1732530613132/current, will proceed with Du for space computation calculation, 2024-11-25T10:30:25,356 WARN [Thread-843 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:30:25,359 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1d65c912451e92a6 with lease ID 0xaaa0c3a9855046d8: Processing first storage report for DS-dd6078ff-95e5-457f-866e-84f4811f1ac6 from datanode DatanodeRegistration(127.0.0.1:37725, datanodeUuid=e20cad83-53e7-409e-9ba6-a6b328f9d23c, infoPort=37905, infoSecurePort=0, ipcPort=36173, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132) 2024-11-25T10:30:25,359 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1d65c912451e92a6 with lease ID 0xaaa0c3a9855046d8: from storage DS-dd6078ff-95e5-457f-866e-84f4811f1ac6 node DatanodeRegistration(127.0.0.1:37725, datanodeUuid=e20cad83-53e7-409e-9ba6-a6b328f9d23c, infoPort=37905, infoSecurePort=0, ipcPort=36173, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:25,359 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1d65c912451e92a6 with lease ID 0xaaa0c3a9855046d8: Processing first storage report for DS-cee60b78-9e62-45f2-831b-eba095b4b8e1 from datanode DatanodeRegistration(127.0.0.1:37725, datanodeUuid=e20cad83-53e7-409e-9ba6-a6b328f9d23c, infoPort=37905, infoSecurePort=0, ipcPort=36173, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132) 2024-11-25T10:30:25,359 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1d65c912451e92a6 with lease ID 0xaaa0c3a9855046d8: from storage DS-cee60b78-9e62-45f2-831b-eba095b4b8e1 node DatanodeRegistration(127.0.0.1:37725, datanodeUuid=e20cad83-53e7-409e-9ba6-a6b328f9d23c, infoPort=37905, infoSecurePort=0, ipcPort=36173, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:25,407 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@69de1683{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/java.io.tmpdir/jetty-localhost-45529-hadoop-hdfs-3_4_1-tests_jar-_-any-14944991174059012391/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:25,407 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2801262{HTTP/1.1, (http/1.1)}{localhost:45529} 2024-11-25T10:30:25,408 INFO [Time-limited test {}] server.Server(415): Started @116920ms 2024-11-25T10:30:25,409 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:30:25,496 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data9/current/BP-1978960096-172.17.0.2-1732530613132/current, will proceed with Du for space computation calculation, 2024-11-25T10:30:25,496 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data10/current/BP-1978960096-172.17.0.2-1732530613132/current, will proceed with Du for space computation calculation, 2024-11-25T10:30:25,512 WARN [Thread-878 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:30:25,515 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd58d3b27f8bbc91e with lease ID 0xaaa0c3a9855046d9: Processing first storage report for DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad from datanode DatanodeRegistration(127.0.0.1:39809, datanodeUuid=2c123028-200c-43b3-8f7b-64b4bc0bd9b4, infoPort=45423, infoSecurePort=0, ipcPort=45681, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132) 2024-11-25T10:30:25,515 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd58d3b27f8bbc91e with lease ID 0xaaa0c3a9855046d9: from storage DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad node DatanodeRegistration(127.0.0.1:39809, datanodeUuid=2c123028-200c-43b3-8f7b-64b4bc0bd9b4, infoPort=45423, infoSecurePort=0, ipcPort=45681, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:25,515 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd58d3b27f8bbc91e with lease ID 0xaaa0c3a9855046d9: Processing first storage report for DS-b9fa8850-43ee-4ec6-a1c8-76845d68fd5b from datanode DatanodeRegistration(127.0.0.1:39809, datanodeUuid=2c123028-200c-43b3-8f7b-64b4bc0bd9b4, infoPort=45423, infoSecurePort=0, ipcPort=45681, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132) 2024-11-25T10:30:25,515 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd58d3b27f8bbc91e with lease ID 0xaaa0c3a9855046d9: from storage DS-b9fa8850-43ee-4ec6-a1c8-76845d68fd5b node DatanodeRegistration(127.0.0.1:39809, datanodeUuid=2c123028-200c-43b3-8f7b-64b4bc0bd9b4, infoPort=45423, infoSecurePort=0, ipcPort=45681, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:25,530 WARN [ResponseProcessor for block BP-1978960096-172.17.0.2-1732530613132:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1978960096-172.17.0.2-1732530613132:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:25,530 WARN [ResponseProcessor for block BP-1978960096-172.17.0.2-1732530613132:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1978960096-172.17.0.2-1732530613132:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1978960096-172.17.0.2-1732530613132:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:25,530 WARN [ResponseProcessor for block BP-1978960096-172.17.0.2-1732530613132:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1978960096-172.17.0.2-1732530613132:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1978960096-172.17.0.2-1732530613132:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:25,531 WARN [DataStreamer for file /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 block BP-1978960096-172.17.0.2-1732530613132:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK], DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]) is bad. 2024-11-25T10:30:25,531 WARN [ResponseProcessor for block BP-1978960096-172.17.0.2-1732530613132:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1978960096-172.17.0.2-1732530613132:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1978960096-172.17.0.2-1732530613132:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:25,531 WARN [DataStreamer for file /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/WALs/e2fd862922a0,42939,1732530613957/e2fd862922a0%2C42939%2C1732530613957.1732530614122 block BP-1978960096-172.17.0.2-1732530613132:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK], DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]) is bad. 2024-11-25T10:30:25,531 WARN [DataStreamer for file /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta block BP-1978960096-172.17.0.2-1732530613132:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK], DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]) is bad. 2024-11-25T10:30:25,531 WARN [PacketResponder: BP-1978960096-172.17.0.2-1732530613132:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45247] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,531 WARN [PacketResponder: BP-1978960096-172.17.0.2-1732530613132:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45247] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,532 WARN [DataStreamer for file /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 block BP-1978960096-172.17.0.2-1732530613132:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK], DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]) is bad. 2024-11-25T10:30:25,531 WARN [PacketResponder: BP-1978960096-172.17.0.2-1732530613132:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:45247] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:41882 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35051:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41882 dst: /127.0.0.1:35051 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1908829180_22 at /127.0.0.1:41904 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:35051:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41904 dst: /127.0.0.1:35051 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:58900 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45247:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58900 dst: /127.0.0.1:45247 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1928152329_22 at /127.0.0.1:41854 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35051:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41854 dst: /127.0.0.1:35051 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1928152329_22 at /127.0.0.1:58860 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45247:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58860 dst: /127.0.0.1:45247 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1908829180_22 at /127.0.0.1:58954 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:45247:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58954 dst: /127.0.0.1:45247 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,533 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:41876 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35051:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41876 dst: /127.0.0.1:35051 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,533 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:58916 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45247:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58916 dst: /127.0.0.1:45247 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,535 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@72e5785b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:25,535 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68370875{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:30:25,536 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:30:25,536 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@198c3788{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:30:25,536 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39a69c39{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir/,STOPPED} 2024-11-25T10:30:25,538 WARN [BP-1978960096-172.17.0.2-1732530613132 heartbeating to localhost/127.0.0.1:46735 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:30:25,538 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:30:25,538 WARN [BP-1978960096-172.17.0.2-1732530613132 heartbeating to localhost/127.0.0.1:46735 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1978960096-172.17.0.2-1732530613132 (Datanode Uuid 2cbdce5a-1e08-4a2d-aea4-9761244b9c0d) service to localhost/127.0.0.1:46735 2024-11-25T10:30:25,538 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:30:25,539 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data3/current/BP-1978960096-172.17.0.2-1732530613132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:25,539 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data4/current/BP-1978960096-172.17.0.2-1732530613132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:25,539 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:30:25,540 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@5c6fb630 {}] datanode.DataXceiver(331): 127.0.0.1:35051:DataXceiver error processing unknown operation src: /127.0.0.1:60428 dst: /127.0.0.1:35051 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,541 WARN [DataStreamer for file /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/WALs/e2fd862922a0,42939,1732530613957/e2fd862922a0%2C42939%2C1732530613957.1732530614122 block BP-1978960096-172.17.0.2-1732530613132:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:25,542 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1908829180_22 at /127.0.0.1:60430 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:35051:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60430 dst: /127.0.0.1:35051 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,542 WARN [ResponseProcessor for block BP-1978960096-172.17.0.2-1732530613132:blk_1073741833_1017 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1978960096-172.17.0.2-1732530613132:blk_1073741833_1017 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:25,543 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:60422 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35051:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60422 dst: /127.0.0.1:35051 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,543 WARN [ResponseProcessor for block BP-1978960096-172.17.0.2-1732530613132:blk_1073741837_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1978960096-172.17.0.2-1732530613132:blk_1073741837_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:25,543 WARN [ResponseProcessor for block BP-1978960096-172.17.0.2-1732530613132:blk_1073741834_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1978960096-172.17.0.2-1732530613132:blk_1073741834_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:25,543 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:60426 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35051:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60426 dst: /127.0.0.1:35051 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:25,544 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@475f8022{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:25,545 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f8b6f27{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:30:25,545 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:30:25,545 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c3d2a60{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:30:25,545 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bb4f47b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir/,STOPPED} 2024-11-25T10:30:25,546 WARN [BP-1978960096-172.17.0.2-1732530613132 heartbeating to localhost/127.0.0.1:46735 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:30:25,546 WARN [BP-1978960096-172.17.0.2-1732530613132 heartbeating to localhost/127.0.0.1:46735 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1978960096-172.17.0.2-1732530613132 (Datanode Uuid 22f4e76f-dcf4-4d8b-86b7-f74f3d6f9055) service to localhost/127.0.0.1:46735 2024-11-25T10:30:25,546 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:30:25,546 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:30:25,547 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data1/current/BP-1978960096-172.17.0.2-1732530613132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:25,547 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data2/current/BP-1978960096-172.17.0.2-1732530613132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:25,547 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:30:25,551 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96., hostname=e2fd862922a0,35207,1732530614007, seqNum=2] 2024-11-25T10:30:25,552 ERROR [FSHLog-0-hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56-prefix:e2fd862922a0,35207,1732530614007 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:25,553 WARN [FSHLog-0-hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56-prefix:e2fd862922a0,35207,1732530614007 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:25,553 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:25,553 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C35207%2C1732530614007:(num 1732530614468) roll requested 2024-11-25T10:30:25,553 INFO [regionserver/e2fd862922a0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C35207%2C1732530614007.1732530625553 2024-11-25T10:30:25,559 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:25,559 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:25,559 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:25,559 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:25,560 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:25,560 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530625553 2024-11-25T10:30:25,560 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:25,560 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:25,560 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44631:44631),(127.0.0.1/127.0.0.1:37905:37905)] 2024-11-25T10:30:25,561 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 is not closed yet, will try archiving it next time 2024-11-25T10:30:25,561 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-25T10:30:25,561 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-25T10:30:25,562 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 2024-11-25T10:30:25,564 WARN [IPC Server handler 2 on default port 46735 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1017 2024-11-25T10:30:25,567 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 after 4ms 2024-11-25T10:30:26,097 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:26,848 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:27,561 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:27,562 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530625553 2024-11-25T10:30:27,563 WARN [ResponseProcessor for block BP-1978960096-172.17.0.2-1732530613132:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1978960096-172.17.0.2-1732530613132:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:27,563 WARN [DataStreamer for file /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530625553 block BP-1978960096-172.17.0.2-1732530613132:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK], DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK]) is bad. 2024-11-25T10:30:27,563 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:48308 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:41177:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48308 dst: /127.0.0.1:41177 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:27,564 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:54234 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:37725:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54234 dst: /127.0.0.1:37725 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:27,565 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43e34915{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:27,565 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d7fa207{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:30:27,565 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:30:27,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@219c70cc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:30:27,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@402062d6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir/,STOPPED} 2024-11-25T10:30:27,567 WARN [BP-1978960096-172.17.0.2-1732530613132 heartbeating to localhost/127.0.0.1:46735 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:30:27,567 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:30:27,567 WARN [BP-1978960096-172.17.0.2-1732530613132 heartbeating to localhost/127.0.0.1:46735 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1978960096-172.17.0.2-1732530613132 (Datanode Uuid 74310fe5-4036-4c76-b868-490e9f34781c) service to localhost/127.0.0.1:46735 2024-11-25T10:30:27,567 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:30:27,567 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data5/current/BP-1978960096-172.17.0.2-1732530613132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:27,567 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data6/current/BP-1978960096-172.17.0.2-1732530613132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:27,568 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:30:28,097 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:28,848 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:29,561 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:29,562 WARN [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]] 2024-11-25T10:30:29,562 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C35207%2C1732530614007:(num 1732530625553) roll requested 2024-11-25T10:30:29,562 INFO [regionserver/e2fd862922a0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C35207%2C1732530614007.1732530629562 2024-11-25T10:30:29,565 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:29,565 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK], DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]) is bad. 2024-11-25T10:30:29,565 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741839_1021 2024-11-25T10:30:29,568 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 after 4006ms 2024-11-25T10:30:29,568 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK] 2024-11-25T10:30:29,571 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:29,571 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK], DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK]) is bad. 2024-11-25T10:30:29,571 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741840_1022 2024-11-25T10:30:29,572 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK] 2024-11-25T10:30:29,573 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T10:30:29,574 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:54252 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data8]'}, localName='127.0.0.1:37725', datanodeUuid='e20cad83-53e7-409e-9ba6-a6b328f9d23c', xmitsInProgress=0}:Exception transferring block BP-1978960096-172.17.0.2-1732530613132:blk_1073741841_1023 to mirror 127.0.0.1:45247 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:29,574 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45247 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:29,574 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:54252 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-25T10:30:29,574 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK], DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]) is bad. 2024-11-25T10:30:29,574 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741841_1023 2024-11-25T10:30:29,574 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:54252 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:37725:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54252 dst: /127.0.0.1:37725 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:29,575 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK] 2024-11-25T10:30:29,579 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:29,579 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:29,579 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:29,579 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:29,579 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:29,580 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530625553 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530629562 2024-11-25T10:30:29,580 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37905:37905),(127.0.0.1/127.0.0.1:45423:45423)] 2024-11-25T10:30:29,581 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 is not closed yet, will try archiving it next time 2024-11-25T10:30:29,581 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530625553 is not closed yet, will try archiving it next time 2024-11-25T10:30:29,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37725 is added to blk_1073741838_1020 (size=3600) 2024-11-25T10:30:29,982 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 is not closed yet, will try archiving it next time 2024-11-25T10:30:30,098 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:30,849 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:31,576 WARN [ResponseProcessor for block BP-1978960096-172.17.0.2-1732530613132:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1978960096-172.17.0.2-1732530613132:blk_1073741842_1024 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:31,577 WARN [DataStreamer for file /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530629562 block BP-1978960096-172.17.0.2-1732530613132:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:31,577 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:54262 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:37725:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54262 dst: /127.0.0.1:37725 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:31,577 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:37164 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:39809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37164 dst: /127.0.0.1:39809 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:31,579 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@233be953{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:31,579 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@71810790{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:30:31,579 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:30:31,579 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4fb99827{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:30:31,580 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c5e4864{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir/,STOPPED} 2024-11-25T10:30:31,581 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:31,581 WARN [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]] 2024-11-25T10:30:31,581 WARN [BP-1978960096-172.17.0.2-1732530613132 heartbeating to localhost/127.0.0.1:46735 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:30:31,581 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:30:31,581 WARN [BP-1978960096-172.17.0.2-1732530613132 heartbeating to localhost/127.0.0.1:46735 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1978960096-172.17.0.2-1732530613132 (Datanode Uuid e20cad83-53e7-409e-9ba6-a6b328f9d23c) service to localhost/127.0.0.1:46735 2024-11-25T10:30:31,581 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C35207%2C1732530614007:(num 1732530629562) roll requested 2024-11-25T10:30:31,581 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:30:31,582 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data7/current/BP-1978960096-172.17.0.2-1732530613132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:31,582 INFO [regionserver/e2fd862922a0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C35207%2C1732530614007.1732530631581 2024-11-25T10:30:31,582 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data8/current/BP-1978960096-172.17.0.2-1732530613132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:30:31,582 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:30:31,585 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:31,585 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK], DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]) is bad. 2024-11-25T10:30:31,585 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741843_1026 2024-11-25T10:30:31,585 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK] 2024-11-25T10:30:31,586 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:31,587 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK]) is bad. 2024-11-25T10:30:31,587 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741844_1027 2024-11-25T10:30:31,587 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK] 2024-11-25T10:30:31,588 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:31,589 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]) is bad. 2024-11-25T10:30:31,589 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741845_1028 2024-11-25T10:30:31,589 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK] 2024-11-25T10:30:31,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35207 {}] regionserver.HRegion(8855): Flush requested on 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:31,592 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51242 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data10]'}, localName='127.0.0.1:39809', datanodeUuid='2c123028-200c-43b3-8f7b-64b4bc0bd9b4', xmitsInProgress=0}:Exception transferring block BP-1978960096-172.17.0.2-1732530613132:blk_1073741846_1029 to mirror 127.0.0.1:37725 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:31,592 WARN [Thread-929 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37725 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:31,592 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51242 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-25T10:30:31,592 WARN [Thread-929 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK], DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:31,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4e397356122b717e0fcde1a373944b96 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T10:30:31,592 WARN [Thread-929 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741846_1029 2024-11-25T10:30:31,592 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51242 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:39809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51242 dst: /127.0.0.1:39809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:31,592 WARN [Thread-929 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK] 2024-11-25T10:30:31,593 WARN [IPC Server handler 4 on default port 46735 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T10:30:31,593 WARN [IPC Server handler 4 on default port 46735 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T10:30:31,593 WARN [IPC Server handler 4 on default port 46735 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T10:30:31,597 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:31,597 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:31,598 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:31,598 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:31,598 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:31,598 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530629562 with entries=7, filesize=7.25 KB; new WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530631581 2024-11-25T10:30:31,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741842_1025 (size=7430) 2024-11-25T10:30:31,600 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45423:45423)] 2024-11-25T10:30:31,601 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 is not closed yet, will try archiving it next time 2024-11-25T10:30:31,601 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530629562 is not closed yet, will try archiving it next time 2024-11-25T10:30:31,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/f39dc1c6651440a2b1629ff0d760de73 is 1080, key is row0002/info:/1732530627569/Put/seqid=0 2024-11-25T10:30:31,614 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:31,615 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]) is bad. 2024-11-25T10:30:31,615 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741848_1031 2024-11-25T10:30:31,615 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK] 2024-11-25T10:30:31,617 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:31,617 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK], DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:31,617 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741849_1032 2024-11-25T10:30:31,618 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK] 2024-11-25T10:30:31,619 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:31,619 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]) is bad. 2024-11-25T10:30:31,619 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741850_1033 2024-11-25T10:30:31,620 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK] 2024-11-25T10:30:31,621 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:31,621 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK]) is bad. 2024-11-25T10:30:31,621 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741851_1034 2024-11-25T10:30:31,621 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK] 2024-11-25T10:30:31,622 WARN [IPC Server handler 4 on default port 46735 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T10:30:31,622 WARN [IPC Server handler 4 on default port 46735 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T10:30:31,622 WARN [IPC Server handler 4 on default port 46735 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T10:30:31,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741852_1035 (size=10347) 2024-11-25T10:30:32,001 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 is not closed yet, will try archiving it next time 2024-11-25T10:30:32,026 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/f39dc1c6651440a2b1629ff0d760de73 2024-11-25T10:30:32,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/f39dc1c6651440a2b1629ff0d760de73 as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/f39dc1c6651440a2b1629ff0d760de73 2024-11-25T10:30:32,039 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/f39dc1c6651440a2b1629ff0d760de73, entries=5, sequenceid=11, filesize=10.1 K 2024-11-25T10:30:32,040 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 4e397356122b717e0fcde1a373944b96 in 448ms, sequenceid=11, compaction requested=false 2024-11-25T10:30:32,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4e397356122b717e0fcde1a373944b96: 2024-11-25T10:30:32,098 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:32,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35207 {}] regionserver.HRegion(8855): Flush requested on 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:32,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4e397356122b717e0fcde1a373944b96 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-25T10:30:32,220 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/c319784c4471499680174f983d87ea27 is 1080, key is row0007/info:/1732530631593/Put/seqid=0 2024-11-25T10:30:32,223 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37725 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:32,223 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51276 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data10]'}, localName='127.0.0.1:39809', datanodeUuid='2c123028-200c-43b3-8f7b-64b4bc0bd9b4', xmitsInProgress=0}:Exception transferring block BP-1978960096-172.17.0.2-1732530613132:blk_1073741853_1036 to mirror 127.0.0.1:37725 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:32,223 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK], DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:32,223 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741853_1036 2024-11-25T10:30:32,223 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51276 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T10:30:32,223 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51276 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:39809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51276 dst: /127.0.0.1:39809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:32,223 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK] 2024-11-25T10:30:32,225 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:32,225 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]) is bad. 2024-11-25T10:30:32,225 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741854_1037 2024-11-25T10:30:32,225 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK] 2024-11-25T10:30:32,227 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41177 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:32,227 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51290 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data10]'}, localName='127.0.0.1:39809', datanodeUuid='2c123028-200c-43b3-8f7b-64b4bc0bd9b4', xmitsInProgress=0}:Exception transferring block BP-1978960096-172.17.0.2-1732530613132:blk_1073741855_1038 to mirror 127.0.0.1:41177 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:32,227 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK], DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK]) is bad. 2024-11-25T10:30:32,227 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741855_1038 2024-11-25T10:30:32,227 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51290 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T10:30:32,227 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51290 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:39809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51290 dst: /127.0.0.1:39809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:32,228 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK] 2024-11-25T10:30:32,230 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45247 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:32,230 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51294 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data10]'}, localName='127.0.0.1:39809', datanodeUuid='2c123028-200c-43b3-8f7b-64b4bc0bd9b4', xmitsInProgress=0}:Exception transferring block BP-1978960096-172.17.0.2-1732530613132:blk_1073741856_1039 to mirror 127.0.0.1:45247 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:32,230 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK], DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]) is bad. 2024-11-25T10:30:32,230 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741856_1039 2024-11-25T10:30:32,230 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51294 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T10:30:32,230 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51294 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:39809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51294 dst: /127.0.0.1:39809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:32,230 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK] 2024-11-25T10:30:32,231 WARN [IPC Server handler 1 on default port 46735 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T10:30:32,231 WARN [IPC Server handler 1 on default port 46735 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T10:30:32,231 WARN [IPC Server handler 1 on default port 46735 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T10:30:32,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741857_1040 (size=12506) 2024-11-25T10:30:32,634 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/c319784c4471499680174f983d87ea27 2024-11-25T10:30:32,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/c319784c4471499680174f983d87ea27 as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/c319784c4471499680174f983d87ea27 2024-11-25T10:30:32,647 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/c319784c4471499680174f983d87ea27, entries=7, sequenceid=24, filesize=12.2 K 2024-11-25T10:30:32,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 4e397356122b717e0fcde1a373944b96 in 433ms, sequenceid=24, compaction requested=false 2024-11-25T10:30:32,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4e397356122b717e0fcde1a373944b96: 2024-11-25T10:30:32,649 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-25T10:30:32,649 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:30:32,649 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/c319784c4471499680174f983d87ea27 because midkey is the same as first or last row 2024-11-25T10:30:32,849 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:33,601 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:33,601 WARN [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]] 2024-11-25T10:30:33,601 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C35207%2C1732530614007:(num 1732530631581) roll requested 2024-11-25T10:30:33,602 INFO [regionserver/e2fd862922a0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C35207%2C1732530614007.1732530633601 2024-11-25T10:30:33,604 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:33,604 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]) is bad. 2024-11-25T10:30:33,604 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741858_1041 2024-11-25T10:30:33,605 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK] 2024-11-25T10:30:33,607 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37725 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:33,607 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51308 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data10]'}, localName='127.0.0.1:39809', datanodeUuid='2c123028-200c-43b3-8f7b-64b4bc0bd9b4', xmitsInProgress=0}:Exception transferring block BP-1978960096-172.17.0.2-1732530613132:blk_1073741859_1042 to mirror 127.0.0.1:37725 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:33,607 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK], DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:33,607 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741859_1042 2024-11-25T10:30:33,608 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51308 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-25T10:30:33,608 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51308 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:39809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51308 dst: /127.0.0.1:39809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:33,608 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK] 2024-11-25T10:30:33,609 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:33,610 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK]) is bad. 2024-11-25T10:30:33,610 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741860_1043 2024-11-25T10:30:33,610 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK] 2024-11-25T10:30:33,612 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:33,612 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]) is bad. 2024-11-25T10:30:33,612 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741861_1044 2024-11-25T10:30:33,612 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK] 2024-11-25T10:30:33,613 WARN [IPC Server handler 2 on default port 46735 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T10:30:33,613 WARN [IPC Server handler 2 on default port 46735 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T10:30:33,613 WARN [IPC Server handler 2 on default port 46735 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T10:30:33,616 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:33,616 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:33,616 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:33,616 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:33,616 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:33,616 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530631581 with entries=17, filesize=17.07 KB; new WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530633601 2024-11-25T10:30:33,617 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45423:45423)] 2024-11-25T10:30:33,617 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 is not closed yet, will try archiving it next time 2024-11-25T10:30:33,617 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530631581 is not closed yet, will try archiving it next time 2024-11-25T10:30:33,617 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530625553 to hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/oldWALs/e2fd862922a0%2C35207%2C1732530614007.1732530625553 2024-11-25T10:30:33,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741847_1030 (size=17486) 2024-11-25T10:30:33,619 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530629562 to hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/oldWALs/e2fd862922a0%2C35207%2C1732530614007.1732530629562 2024-11-25T10:30:33,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35207 {}] regionserver.HRegion(8855): Flush requested on 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:33,634 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4e397356122b717e0fcde1a373944b96 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-25T10:30:33,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/10da2ed839b64066bc766ca43f55f48c is 1079, key is tmprow/info:/1732530633633/Put/seqid=0 2024-11-25T10:30:33,640 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45247 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:33,640 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51322 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data10]'}, localName='127.0.0.1:39809', datanodeUuid='2c123028-200c-43b3-8f7b-64b4bc0bd9b4', xmitsInProgress=0}:Exception transferring block BP-1978960096-172.17.0.2-1732530613132:blk_1073741863_1046 to mirror 127.0.0.1:45247 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:33,641 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK], DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]) is bad. 2024-11-25T10:30:33,641 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741863_1046 2024-11-25T10:30:33,641 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51322 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T10:30:33,641 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51322 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:39809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51322 dst: /127.0.0.1:39809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:33,641 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK] 2024-11-25T10:30:33,642 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:33,643 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK], DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:33,643 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741864_1047 2024-11-25T10:30:33,643 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK] 2024-11-25T10:30:33,644 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:33,644 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK], DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK]) is bad. 2024-11-25T10:30:33,644 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741865_1048 2024-11-25T10:30:33,645 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK] 2024-11-25T10:30:33,646 WARN [Thread-951 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:33,646 WARN [Thread-951 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]) is bad. 2024-11-25T10:30:33,646 WARN [Thread-951 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741866_1049 2024-11-25T10:30:33,646 WARN [Thread-951 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK] 2024-11-25T10:30:33,647 WARN [IPC Server handler 0 on default port 46735 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T10:30:33,647 WARN [IPC Server handler 0 on default port 46735 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T10:30:33,647 WARN [IPC Server handler 0 on default port 46735 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T10:30:33,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741867_1050 (size=6027) 2024-11-25T10:30:34,019 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 is not closed yet, will try archiving it next time 2024-11-25T10:30:34,051 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/10da2ed839b64066bc766ca43f55f48c 2024-11-25T10:30:34,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/10da2ed839b64066bc766ca43f55f48c as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/10da2ed839b64066bc766ca43f55f48c 2024-11-25T10:30:34,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/10da2ed839b64066bc766ca43f55f48c, entries=1, sequenceid=34, filesize=5.9 K 2024-11-25T10:30:34,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 4e397356122b717e0fcde1a373944b96 in 431ms, sequenceid=34, compaction requested=true 2024-11-25T10:30:34,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4e397356122b717e0fcde1a373944b96: 2024-11-25T10:30:34,064 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-25T10:30:34,064 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:30:34,064 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/c319784c4471499680174f983d87ea27 because midkey is the same as first or last row 2024-11-25T10:30:34,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4e397356122b717e0fcde1a373944b96:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T10:30:34,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:30:34,065 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T10:30:34,066 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T10:30:34,066 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.HStore(1541): 4e397356122b717e0fcde1a373944b96/info is initiating minor compaction (all files) 2024-11-25T10:30:34,066 INFO [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4e397356122b717e0fcde1a373944b96/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:34,066 INFO [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/f39dc1c6651440a2b1629ff0d760de73, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/c319784c4471499680174f983d87ea27, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/10da2ed839b64066bc766ca43f55f48c] into tmpdir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp, totalSize=28.2 K 2024-11-25T10:30:34,067 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] compactions.Compactor(225): Compacting f39dc1c6651440a2b1629ff0d760de73, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732530627569 2024-11-25T10:30:34,067 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] compactions.Compactor(225): Compacting c319784c4471499680174f983d87ea27, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732530631593 2024-11-25T10:30:34,067 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 10da2ed839b64066bc766ca43f55f48c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732530633633 2024-11-25T10:30:34,080 INFO [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4e397356122b717e0fcde1a373944b96#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:30:34,080 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/e9a303244de34ccd9725b5811b3b8bbd is 1080, key is row0002/info:/1732530627569/Put/seqid=0 2024-11-25T10:30:34,082 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51354 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data10]'}, localName='127.0.0.1:39809', datanodeUuid='2c123028-200c-43b3-8f7b-64b4bc0bd9b4', xmitsInProgress=0}:Exception transferring block BP-1978960096-172.17.0.2-1732530613132:blk_1073741868_1051 to mirror 127.0.0.1:41177 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:34,083 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41177 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:34,083 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51354 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T10:30:34,083 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK], DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK]) is bad. 2024-11-25T10:30:34,083 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51354 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:39809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51354 dst: /127.0.0.1:39809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:34,083 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741868_1051 2024-11-25T10:30:34,084 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK] 2024-11-25T10:30:34,085 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:34,085 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:34,085 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741869_1052 2024-11-25T10:30:34,085 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK] 2024-11-25T10:30:34,087 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:34,087 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK], DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]) is bad. 2024-11-25T10:30:34,087 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741870_1053 2024-11-25T10:30:34,087 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK] 2024-11-25T10:30:34,088 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:34,088 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]) is bad. 2024-11-25T10:30:34,088 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741871_1054 2024-11-25T10:30:34,089 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK] 2024-11-25T10:30:34,089 WARN [IPC Server handler 3 on default port 46735 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T10:30:34,089 WARN [IPC Server handler 3 on default port 46735 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T10:30:34,089 WARN [IPC Server handler 3 on default port 46735 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T10:30:34,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741872_1055 (size=17994) 2024-11-25T10:30:34,098 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:34,500 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/e9a303244de34ccd9725b5811b3b8bbd as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/e9a303244de34ccd9725b5811b3b8bbd 2024-11-25T10:30:34,507 INFO [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4e397356122b717e0fcde1a373944b96/info of 4e397356122b717e0fcde1a373944b96 into e9a303244de34ccd9725b5811b3b8bbd(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:30:34,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4e397356122b717e0fcde1a373944b96: 2024-11-25T10:30:34,507 INFO [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96., storeName=4e397356122b717e0fcde1a373944b96/info, priority=13, startTime=1732530634064; duration=0sec 2024-11-25T10:30:34,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-25T10:30:34,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:30:34,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/e9a303244de34ccd9725b5811b3b8bbd because midkey is the same as first or last row 2024-11-25T10:30:34,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-25T10:30:34,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:30:34,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/e9a303244de34ccd9725b5811b3b8bbd because midkey is the same as first or last row 2024-11-25T10:30:34,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-25T10:30:34,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:30:34,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/e9a303244de34ccd9725b5811b3b8bbd because midkey is the same as first or last row 2024-11-25T10:30:34,508 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:30:34,508 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4e397356122b717e0fcde1a373944b96:info 2024-11-25T10:30:34,527 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@67feaf7b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39809, datanodeUuid=2c123028-200c-43b3-8f7b-64b4bc0bd9b4, infoPort=45423, infoSecurePort=0, ipcPort=45681, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132):Failed to transfer BP-1978960096-172.17.0.2-1732530613132:blk_1073741842_1025 to 127.0.0.1:37725 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:34,527 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6d01986a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39809, datanodeUuid=2c123028-200c-43b3-8f7b-64b4bc0bd9b4, infoPort=45423, infoSecurePort=0, ipcPort=45681, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132):Failed to transfer BP-1978960096-172.17.0.2-1732530613132:blk_1073741852_1035 to 127.0.0.1:41177 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:34,849 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:35,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35207 {}] regionserver.HRegion(8855): Flush requested on 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:35,051 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4e397356122b717e0fcde1a373944b96 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-25T10:30:35,056 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/8185f2d89cfa46db95edf8bf008fd9ab is 1079, key is tmprow/info:/1732530635050/Put/seqid=0 2024-11-25T10:30:35,058 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45247 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:35,058 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51382 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data10]'}, localName='127.0.0.1:39809', datanodeUuid='2c123028-200c-43b3-8f7b-64b4bc0bd9b4', xmitsInProgress=0}:Exception transferring block BP-1978960096-172.17.0.2-1732530613132:blk_1073741873_1056 to mirror 127.0.0.1:45247 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:35,058 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK], DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]) is bad. 2024-11-25T10:30:35,059 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741873_1056 2024-11-25T10:30:35,059 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51382 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T10:30:35,059 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51382 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:39809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51382 dst: /127.0.0.1:39809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:35,059 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45247,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK] 2024-11-25T10:30:35,060 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:35,060 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK], DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]) is bad. 2024-11-25T10:30:35,060 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741874_1057 2024-11-25T10:30:35,061 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK] 2024-11-25T10:30:35,062 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:35,062 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK]) is bad. 2024-11-25T10:30:35,062 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741875_1058 2024-11-25T10:30:35,062 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK] 2024-11-25T10:30:35,064 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37725 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:35,064 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51384 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741876_1059] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data10]'}, localName='127.0.0.1:39809', datanodeUuid='2c123028-200c-43b3-8f7b-64b4bc0bd9b4', xmitsInProgress=0}:Exception transferring block BP-1978960096-172.17.0.2-1732530613132:blk_1073741876_1059 to mirror 127.0.0.1:37725 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:35,064 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK], DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:35,064 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741876_1059 2024-11-25T10:30:35,065 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51384 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741876_1059] {}] datanode.BlockReceiver(316): Block 1073741876 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-25T10:30:35,065 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:51384 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741876_1059] {}] datanode.DataXceiver(331): 127.0.0.1:39809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51384 dst: /127.0.0.1:39809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:35,065 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK] 2024-11-25T10:30:35,065 WARN [IPC Server handler 4 on default port 46735 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-25T10:30:35,065 WARN [IPC Server handler 4 on default port 46735 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-25T10:30:35,066 WARN [IPC Server handler 4 on default port 46735 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-25T10:30:35,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741877_1060 (size=6027) 2024-11-25T10:30:35,469 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/8185f2d89cfa46db95edf8bf008fd9ab 2024-11-25T10:30:35,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/8185f2d89cfa46db95edf8bf008fd9ab as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/8185f2d89cfa46db95edf8bf008fd9ab 2024-11-25T10:30:35,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/8185f2d89cfa46db95edf8bf008fd9ab, entries=1, sequenceid=45, filesize=5.9 K 2024-11-25T10:30:35,482 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 4e397356122b717e0fcde1a373944b96 in 431ms, sequenceid=45, compaction requested=false 2024-11-25T10:30:35,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4e397356122b717e0fcde1a373944b96: 2024-11-25T10:30:35,482 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-25T10:30:35,482 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:30:35,482 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/e9a303244de34ccd9725b5811b3b8bbd because midkey is the same as first or last row 2024-11-25T10:30:35,516 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6d01986a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39809, datanodeUuid=2c123028-200c-43b3-8f7b-64b4bc0bd9b4, infoPort=45423, infoSecurePort=0, ipcPort=45681, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132):Failed to transfer BP-1978960096-172.17.0.2-1732530613132:blk_1073741847_1030 to 127.0.0.1:35051 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:35,516 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@67feaf7b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39809, datanodeUuid=2c123028-200c-43b3-8f7b-64b4bc0bd9b4, infoPort=45423, infoSecurePort=0, ipcPort=45681, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132):Failed to transfer BP-1978960096-172.17.0.2-1732530613132:blk_1073741857_1040 to 127.0.0.1:37725 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:35,618 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:35,618 WARN [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-25T10:30:35,665 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:30:35,668 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:30:35,669 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:30:35,669 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:30:35,669 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T10:30:35,670 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@616b5398{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:30:35,670 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a48a14c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:30:35,784 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6869cf12{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/java.io.tmpdir/jetty-localhost-43313-hadoop-hdfs-3_4_1-tests_jar-_-any-3839456725138608500/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:30:35,785 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17a19f67{HTTP/1.1, (http/1.1)}{localhost:43313} 2024-11-25T10:30:35,785 INFO [Time-limited test {}] server.Server(415): Started @127297ms 2024-11-25T10:30:35,786 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:30:35,877 WARN [Thread-987 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:30:35,881 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ef415bd6aac018b with lease ID 0xaaa0c3a9855046da: from storage DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80 node DatanodeRegistration(127.0.0.1:46773, datanodeUuid=2cbdce5a-1e08-4a2d-aea4-9761244b9c0d, infoPort=42535, infoSecurePort=0, ipcPort=39031, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T10:30:35,881 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3ef415bd6aac018b with lease ID 0xaaa0c3a9855046da: from storage DS-1d3585b4-60ef-42d6-97bf-07ada3b99f50 node DatanodeRegistration(127.0.0.1:46773, datanodeUuid=2cbdce5a-1e08-4a2d-aea4-9761244b9c0d, infoPort=42535, infoSecurePort=0, ipcPort=39031, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:30:36,098 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:36,850 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:37,516 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6d01986a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39809, datanodeUuid=2c123028-200c-43b3-8f7b-64b4bc0bd9b4, infoPort=45423, infoSecurePort=0, ipcPort=45681, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132):Failed to transfer BP-1978960096-172.17.0.2-1732530613132:blk_1073741872_1055 to 127.0.0.1:37725 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:37,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741867_1050 (size=6027) 2024-11-25T10:30:37,618 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:38,099 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:38,516 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@67feaf7b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39809, datanodeUuid=2c123028-200c-43b3-8f7b-64b4bc0bd9b4, infoPort=45423, infoSecurePort=0, ipcPort=45681, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132):Failed to transfer BP-1978960096-172.17.0.2-1732530613132:blk_1073741877_1060 to 127.0.0.1:37725 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:38,850 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:39,618 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:40,099 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:40,850 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:41,619 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:42,099 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:42,851 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:43,619 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:43,937 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T10:30:44,100 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:44,248 ERROR [FSHLog-0-hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData-prefix:e2fd862922a0,42939,1732530613957 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:44,248 WARN [FSHLog-0-hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData-prefix:e2fd862922a0,42939,1732530613957 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:44,248 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C42939%2C1732530613957:(num 1732530614122) roll requested 2024-11-25T10:30:44,248 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C42939%2C1732530613957.1732530644248 2024-11-25T10:30:44,251 WARN [Thread-1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:44,251 WARN [Thread-1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK], DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]) is bad. 2024-11-25T10:30:44,251 WARN [Thread-1006 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741878_1061 2024-11-25T10:30:44,252 WARN [Thread-1006 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK] 2024-11-25T10:30:44,253 WARN [Thread-1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:44,253 WARN [Thread-1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK], DatanodeInfoWithStorage[127.0.0.1:41177,DS-c8e65e98-181d-4efb-96b1-3044c3a72886,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:44,253 WARN [Thread-1006 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741879_1062 2024-11-25T10:30:44,254 WARN [Thread-1006 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK] 2024-11-25T10:30:44,258 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:44,258 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:44,258 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:44,258 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:44,258 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:44,259 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/WALs/e2fd862922a0,42939,1732530613957/e2fd862922a0%2C42939%2C1732530613957.1732530614122 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/WALs/e2fd862922a0,42939,1732530613957/e2fd862922a0%2C42939%2C1732530613957.1732530644248 2024-11-25T10:30:44,259 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:44,259 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:44,259 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/WALs/e2fd862922a0,42939,1732530613957/e2fd862922a0%2C42939%2C1732530613957.1732530614122 2024-11-25T10:30:44,259 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42535:42535),(127.0.0.1/127.0.0.1:45423:45423)] 2024-11-25T10:30:44,259 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/WALs/e2fd862922a0,42939,1732530613957/e2fd862922a0%2C42939%2C1732530613957.1732530614122 is not closed yet, will try archiving it next time 2024-11-25T10:30:44,259 WARN [IPC Server handler 2 on default port 46735 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/WALs/e2fd862922a0,42939,1732530613957/e2fd862922a0%2C42939%2C1732530613957.1732530614122 has not been closed. Lease recovery is in progress. RecoveryId = 1064 for block blk_1073741830_1006 2024-11-25T10:30:44,260 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/WALs/e2fd862922a0,42939,1732530613957/e2fd862922a0%2C42939%2C1732530613957.1732530614122 after 1ms 2024-11-25T10:30:44,851 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:45,619 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:46,851 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:47,620 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:48,261 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/WALs/e2fd862922a0,42939,1732530613957/e2fd862922a0%2C42939%2C1732530613957.1732530614122 after 4002ms 2024-11-25T10:30:48,852 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:49,620 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:49,881 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3bcdf559[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46773, datanodeUuid=2cbdce5a-1e08-4a2d-aea4-9761244b9c0d, infoPort=42535, infoSecurePort=0, ipcPort=39031, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132):Failed to transfer BP-1978960096-172.17.0.2-1732530613132:blk_1073741836_1012 to 127.0.0.1:37725 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:49,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:30:50,852 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:50,880 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3bcdf559[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46773, datanodeUuid=2cbdce5a-1e08-4a2d-aea4-9761244b9c0d, infoPort=42535, infoSecurePort=0, ipcPort=39031, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132):Failed to transfer BP-1978960096-172.17.0.2-1732530613132:blk_1073741826_1002 to 127.0.0.1:37725 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:50,880 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@13cc8de0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46773, datanodeUuid=2cbdce5a-1e08-4a2d-aea4-9761244b9c0d, infoPort=42535, infoSecurePort=0, ipcPort=39031, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132):Failed to transfer BP-1978960096-172.17.0.2-1732530613132:blk_1073741828_1004 to 127.0.0.1:37725 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:51,418 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C35207%2C1732530614007.1732530651418 2024-11-25T10:30:51,421 WARN [Thread-1018 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:51,421 WARN [Thread-1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741881_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK], DatanodeInfoWithStorage[127.0.0.1:46773,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:51,421 WARN [Thread-1018 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741881_1065 2024-11-25T10:30:51,422 WARN [Thread-1018 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK] 2024-11-25T10:30:51,426 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,426 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,426 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,426 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,426 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,426 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530633601 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530651418 2024-11-25T10:30:51,427 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45423:45423),(127.0.0.1/127.0.0.1:42535:42535)] 2024-11-25T10:30:51,427 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 is not closed yet, will try archiving it next time 2024-11-25T10:30:51,427 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530633601 is not closed yet, will try archiving it next time 2024-11-25T10:30:51,427 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530631581 to hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/oldWALs/e2fd862922a0%2C35207%2C1732530614007.1732530631581 2024-11-25T10:30:51,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741862_1045 (size=13591) 2024-11-25T10:30:51,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35207 {}] regionserver.HRegion(8855): Flush requested on 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:51,438 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 4e397356122b717e0fcde1a373944b96 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-25T10:30:51,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/8fcd1163233c40d587714b864ecf348e is 1080, key is row0013/info:/1732530651429/Put/seqid=0 2024-11-25T10:30:51,445 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:51,445 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741883_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK], DatanodeInfoWithStorage[127.0.0.1:46773,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:51,445 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741883_1067 2024-11-25T10:30:51,446 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK] 2024-11-25T10:30:51,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741884_1068 (size=11421) 2024-11-25T10:30:51,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741884_1068 (size=11421) 2024-11-25T10:30:51,453 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/8fcd1163233c40d587714b864ecf348e 2024-11-25T10:30:51,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/8fcd1163233c40d587714b864ecf348e as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/8fcd1163233c40d587714b864ecf348e 2024-11-25T10:30:51,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/8fcd1163233c40d587714b864ecf348e, entries=6, sequenceid=55, filesize=11.2 K 2024-11-25T10:30:51,467 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 4e397356122b717e0fcde1a373944b96 in 28ms, sequenceid=55, compaction requested=true 2024-11-25T10:30:51,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 4e397356122b717e0fcde1a373944b96: 2024-11-25T10:30:51,467 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-11-25T10:30:51,467 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:30:51,467 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/e9a303244de34ccd9725b5811b3b8bbd because midkey is the same as first or last row 2024-11-25T10:30:51,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 4e397356122b717e0fcde1a373944b96:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T10:30:51,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:30:51,467 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T10:30:51,468 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T10:30:51,468 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.HStore(1541): 4e397356122b717e0fcde1a373944b96/info is initiating minor compaction (all files) 2024-11-25T10:30:51,468 INFO [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4e397356122b717e0fcde1a373944b96/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:51,469 INFO [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/e9a303244de34ccd9725b5811b3b8bbd, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/8185f2d89cfa46db95edf8bf008fd9ab, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/8fcd1163233c40d587714b864ecf348e] into tmpdir=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp, totalSize=34.6 K 2024-11-25T10:30:51,469 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] compactions.Compactor(225): Compacting e9a303244de34ccd9725b5811b3b8bbd, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732530627569 2024-11-25T10:30:51,469 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8185f2d89cfa46db95edf8bf008fd9ab, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732530635050 2024-11-25T10:30:51,470 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8fcd1163233c40d587714b864ecf348e, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732530635455 2024-11-25T10:30:51,487 INFO [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4e397356122b717e0fcde1a373944b96#info#compaction#24 average throughput is 8.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:30:51,488 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/67ceb4adbeac4fd29fc12dd55f65743d is 1080, key is row0002/info:/1732530627569/Put/seqid=0 2024-11-25T10:30:51,489 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:51,490 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741885_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:51,490 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741885_1069 2024-11-25T10:30:51,490 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK] 2024-11-25T10:30:51,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741886_1070 (size=23502) 2024-11-25T10:30:51,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741886_1070 (size=23502) 2024-11-25T10:30:51,500 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/67ceb4adbeac4fd29fc12dd55f65743d as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/67ceb4adbeac4fd29fc12dd55f65743d 2024-11-25T10:30:51,507 INFO [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 4e397356122b717e0fcde1a373944b96/info of 4e397356122b717e0fcde1a373944b96 into 67ceb4adbeac4fd29fc12dd55f65743d(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:30:51,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4e397356122b717e0fcde1a373944b96: 2024-11-25T10:30:51,507 INFO [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96., storeName=4e397356122b717e0fcde1a373944b96/info, priority=13, startTime=1732530651467; duration=0sec 2024-11-25T10:30:51,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-25T10:30:51,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:30:51,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/67ceb4adbeac4fd29fc12dd55f65743d because midkey is the same as first or last row 2024-11-25T10:30:51,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-25T10:30:51,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:30:51,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/67ceb4adbeac4fd29fc12dd55f65743d because midkey is the same as first or last row 2024-11-25T10:30:51,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-11-25T10:30:51,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:30:51,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/67ceb4adbeac4fd29fc12dd55f65743d because midkey is the same as first or last row 2024-11-25T10:30:51,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:30:51,507 DEBUG [RS:0;e2fd862922a0:35207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4e397356122b717e0fcde1a373944b96:info 2024-11-25T10:30:51,621 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:51,621 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-25T10:30:51,651 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T10:30:51,651 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T10:30:51,651 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:30:51,651 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:51,651 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:51,651 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T10:30:51,651 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T10:30:51,652 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1403695941, stopped=false 2024-11-25T10:30:51,652 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e2fd862922a0,42939,1732530613957 2024-11-25T10:30:51,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:30:51,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:30:51,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35123-0x1014ead19430002, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:30:51,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:51,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35123-0x1014ead19430002, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:51,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:30:51,654 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T10:30:51,654 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T10:30:51,654 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:30:51,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:51,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:30:51,654 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2fd862922a0,35207,1732530614007' ***** 2024-11-25T10:30:51,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35123-0x1014ead19430002, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:30:51,654 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T10:30:51,654 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2fd862922a0,35123,1732530614788' ***** 2024-11-25T10:30:51,654 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T10:30:51,655 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T10:30:51,655 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:30:51,655 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T10:30:51,655 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T10:30:51,655 INFO [RS:0;e2fd862922a0:35207 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T10:30:51,655 INFO [RS:0;e2fd862922a0:35207 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T10:30:51,655 INFO [RS:1;e2fd862922a0:35123 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T10:30:51,655 INFO [RS:1;e2fd862922a0:35123 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T10:30:51,655 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(3091): Received CLOSE for 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:51,655 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.HRegionServer(959): stopping server e2fd862922a0,35123,1732530614788 2024-11-25T10:30:51,655 INFO [RS:1;e2fd862922a0:35123 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:30:51,655 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T10:30:51,655 INFO [RS:1;e2fd862922a0:35123 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;e2fd862922a0:35123. 2024-11-25T10:30:51,656 DEBUG [RS:1;e2fd862922a0:35123 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:30:51,656 DEBUG [RS:1;e2fd862922a0:35123 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:51,656 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(959): stopping server e2fd862922a0,35207,1732530614007 2024-11-25T10:30:51,656 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.HRegionServer(976): stopping server e2fd862922a0,35123,1732530614788; all regions closed. 2024-11-25T10:30:51,656 INFO [RS:0;e2fd862922a0:35207 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:30:51,656 INFO [RS:0;e2fd862922a0:35207 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e2fd862922a0:35207. 2024-11-25T10:30:51,656 DEBUG [RS:0;e2fd862922a0:35207 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:30:51,656 DEBUG [RS:0;e2fd862922a0:35207 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:51,656 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4e397356122b717e0fcde1a373944b96, disabling compactions & flushes 2024-11-25T10:30:51,656 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T10:30:51,656 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:51,656 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T10:30:51,656 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:51,656 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T10:30:51,656 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. after waiting 0 ms 2024-11-25T10:30:51,656 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:51,656 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T10:30:51,656 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,656 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,656 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 4e397356122b717e0fcde1a373944b96 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-25T10:30:51,656 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,656 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-25T10:30:51,656 DEBUG [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 4e397356122b717e0fcde1a373944b96=TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.} 2024-11-25T10:30:51,657 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,657 DEBUG [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4e397356122b717e0fcde1a373944b96 2024-11-25T10:30:51,657 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T10:30:51,657 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,657 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T10:30:51,657 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T10:30:51,657 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T10:30:51,657 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T10:30:51,657 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-25T10:30:51,657 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:51,657 ERROR [FSHLog-0-hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56-prefix:e2fd862922a0,35207,1732530614007.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:51,657 WARN [FSHLog-0-hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56-prefix:e2fd862922a0,35207,1732530614007.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:51,657 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:51,657 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 2024-11-25T10:30:51,658 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C35207%2C1732530614007.meta:.meta(num 1732530614659) roll requested 2024-11-25T10:30:51,658 INFO [regionserver/e2fd862922a0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C35207%2C1732530614007.meta.1732530651658.meta 2024-11-25T10:30:51,658 WARN [IPC Server handler 1 on default port 46735 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 has not been closed. Lease recovery is in progress. RecoveryId = 1071 for block blk_1073741837_1015 2024-11-25T10:30:51,658 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 after 1ms 2024-11-25T10:30:51,661 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/088aff2db80345a88c2c45e7daead631 is 1080, key is row0018/info:/1732530651439/Put/seqid=0 2024-11-25T10:30:51,661 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37725 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:51,661 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:56182 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741887_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data10]'}, localName='127.0.0.1:39809', datanodeUuid='2c123028-200c-43b3-8f7b-64b4bc0bd9b4', xmitsInProgress=0}:Exception transferring block BP-1978960096-172.17.0.2-1732530613132:blk_1073741887_1072 to mirror 127.0.0.1:37725 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:51,662 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741887_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK], DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:51,662 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:56182 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741887_1072] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-25T10:30:51,662 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741887_1072 2024-11-25T10:30:51,662 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-851367699_22 at /127.0.0.1:56182 [Receiving block BP-1978960096-172.17.0.2-1732530613132:blk_1073741887_1072] {}] datanode.DataXceiver(331): 127.0.0.1:39809:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56182 dst: /127.0.0.1:39809 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:51,662 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK] 2024-11-25T10:30:51,667 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741888_1073 (size=11421) 2024-11-25T10:30:51,667 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741888_1073 (size=11421) 2024-11-25T10:30:51,667 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,667 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,668 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,668 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530651658.meta 2024-11-25T10:30:51,668 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:51,668 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35051,DS-9b717984-592f-4576-850b-6ea2f4aa1752,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:51,668 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta 2024-11-25T10:30:51,669 WARN [IPC Server handler 2 on default port 46735 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta has not been closed. Lease recovery is in progress. RecoveryId = 1075 for block blk_1073741834_1016 2024-11-25T10:30:51,669 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta after 1ms 2024-11-25T10:30:51,669 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=65 (bloomFilter=true), to=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/088aff2db80345a88c2c45e7daead631 2024-11-25T10:30:51,675 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/.tmp/info/088aff2db80345a88c2c45e7daead631 as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/088aff2db80345a88c2c45e7daead631 2024-11-25T10:30:51,675 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45423:45423),(127.0.0.1/127.0.0.1:42535:42535)] 2024-11-25T10:30:51,676 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta is not closed yet, will try archiving it next time 2024-11-25T10:30:51,681 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/088aff2db80345a88c2c45e7daead631, entries=6, sequenceid=65, filesize=11.2 K 2024-11-25T10:30:51,682 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 4e397356122b717e0fcde1a373944b96 in 26ms, sequenceid=65, compaction requested=false 2024-11-25T10:30:51,683 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/f39dc1c6651440a2b1629ff0d760de73, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/c319784c4471499680174f983d87ea27, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/e9a303244de34ccd9725b5811b3b8bbd, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/10da2ed839b64066bc766ca43f55f48c, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/8185f2d89cfa46db95edf8bf008fd9ab, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/8fcd1163233c40d587714b864ecf348e] to archive 2024-11-25T10:30:51,684 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T10:30:51,685 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/f39dc1c6651440a2b1629ff0d760de73 to hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/f39dc1c6651440a2b1629ff0d760de73 2024-11-25T10:30:51,687 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/c319784c4471499680174f983d87ea27 to hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/c319784c4471499680174f983d87ea27 2024-11-25T10:30:51,688 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/e9a303244de34ccd9725b5811b3b8bbd to hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/e9a303244de34ccd9725b5811b3b8bbd 2024-11-25T10:30:51,689 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/10da2ed839b64066bc766ca43f55f48c to hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/10da2ed839b64066bc766ca43f55f48c 2024-11-25T10:30:51,690 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/8185f2d89cfa46db95edf8bf008fd9ab to hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/8185f2d89cfa46db95edf8bf008fd9ab 2024-11-25T10:30:51,691 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/8fcd1163233c40d587714b864ecf348e to hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/info/8fcd1163233c40d587714b864ecf348e 2024-11-25T10:30:51,692 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e2fd862922a0:42939 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-25T10:30:51,692 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [f39dc1c6651440a2b1629ff0d760de73=10347, c319784c4471499680174f983d87ea27=12506, e9a303244de34ccd9725b5811b3b8bbd=17994, 10da2ed839b64066bc766ca43f55f48c=6027, 8185f2d89cfa46db95edf8bf008fd9ab=6027, 8fcd1163233c40d587714b864ecf348e=11421] 2024-11-25T10:30:51,692 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/.tmp/info/6a432b304d55456b8b233de92d7d0065 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96./info:regioninfo/1732530615254/Put/seqid=0 2024-11-25T10:30:51,697 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/default/TestLogRolling-testLogRollOnDatanodeDeath/4e397356122b717e0fcde1a373944b96/recovered.edits/68.seqid, newMaxSeqId=68, maxSeqId=1 2024-11-25T10:30:51,697 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:51,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741890_1076 (size=7089) 2024-11-25T10:30:51,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741890_1076 (size=7089) 2024-11-25T10:30:51,698 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4e397356122b717e0fcde1a373944b96: Waiting for close lock at 1732530651656Running coprocessor pre-close hooks at 1732530651656Disabling compacts and flushes for region at 1732530651656Disabling writes for close at 1732530651656Obtaining lock to block concurrent updates at 1732530651656Preparing flush snapshotting stores in 4e397356122b717e0fcde1a373944b96 at 1732530651656Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96., syncing WAL and waiting on mvcc, flushsize=dataSize=6455, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1732530651657 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. at 1732530651658 (+1 ms)Flushing 4e397356122b717e0fcde1a373944b96/info: creating writer at 1732530651658Flushing 4e397356122b717e0fcde1a373944b96/info: appending metadata at 1732530651661 (+3 ms)Flushing 4e397356122b717e0fcde1a373944b96/info: closing flushed file at 1732530651661Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f2665d: reopening flushed file at 1732530651674 (+13 ms)Finished flush of dataSize ~6.30 KB/6455, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 4e397356122b717e0fcde1a373944b96 in 26ms, sequenceid=65, compaction requested=false at 1732530651682 (+8 ms)Writing region close event to WAL at 1732530651693 (+11 ms)Running coprocessor post-close hooks at 1732530651697 (+4 ms)Closed at 1732530651697 2024-11-25T10:30:51,698 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732530614882.4e397356122b717e0fcde1a373944b96. 2024-11-25T10:30:51,698 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/.tmp/info/6a432b304d55456b8b233de92d7d0065 2024-11-25T10:30:51,718 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/.tmp/ns/f24c54abb3124ca0bdbd2b17d2824dbf is 43, key is default/ns:d/1732530614704/Put/seqid=0 2024-11-25T10:30:51,720 WARN [Thread-1060 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:51,720 WARN [Thread-1060 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741891_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK], DatanodeInfoWithStorage[127.0.0.1:39809,DS-fa302912-4f1d-451b-b9b2-bf5cfff6fdad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:51,720 WARN [Thread-1060 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741891_1077 2024-11-25T10:30:51,720 WARN [Thread-1060 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK] 2024-11-25T10:30:51,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741892_1078 (size=5153) 2024-11-25T10:30:51,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741892_1078 (size=5153) 2024-11-25T10:30:51,725 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/.tmp/ns/f24c54abb3124ca0bdbd2b17d2824dbf 2024-11-25T10:30:51,746 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/.tmp/table/866c3ad07632420da84f6058e19514d0 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732530615266/Put/seqid=0 2024-11-25T10:30:51,748 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:30:51,748 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1978960096-172.17.0.2-1732530613132:blk_1073741893_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK], DatanodeInfoWithStorage[127.0.0.1:46773,DS-6c37e0a0-0bef-41fc-9ce6-d8e6309d4d80,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK]) is bad. 2024-11-25T10:30:51,748 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-1978960096-172.17.0.2-1732530613132:blk_1073741893_1079 2024-11-25T10:30:51,748 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37725,DS-dd6078ff-95e5-457f-866e-84f4811f1ac6,DISK] 2024-11-25T10:30:51,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741894_1080 (size=5424) 2024-11-25T10:30:51,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741894_1080 (size=5424) 2024-11-25T10:30:51,753 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/.tmp/table/866c3ad07632420da84f6058e19514d0 2024-11-25T10:30:51,759 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/.tmp/info/6a432b304d55456b8b233de92d7d0065 as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/info/6a432b304d55456b8b233de92d7d0065 2024-11-25T10:30:51,764 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/info/6a432b304d55456b8b233de92d7d0065, entries=10, sequenceid=11, filesize=6.9 K 2024-11-25T10:30:51,765 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/.tmp/ns/f24c54abb3124ca0bdbd2b17d2824dbf as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/ns/f24c54abb3124ca0bdbd2b17d2824dbf 2024-11-25T10:30:51,770 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/ns/f24c54abb3124ca0bdbd2b17d2824dbf, entries=2, sequenceid=11, filesize=5.0 K 2024-11-25T10:30:51,771 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/.tmp/table/866c3ad07632420da84f6058e19514d0 as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/table/866c3ad07632420da84f6058e19514d0 2024-11-25T10:30:51,776 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/table/866c3ad07632420da84f6058e19514d0, entries=2, sequenceid=11, filesize=5.3 K 2024-11-25T10:30:51,777 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 120ms, sequenceid=11, compaction requested=false 2024-11-25T10:30:51,783 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-25T10:30:51,784 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T10:30:51,784 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T10:30:51,784 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732530651657Running coprocessor pre-close hooks at 1732530651657Disabling compacts and flushes for region at 1732530651657Disabling writes for close at 1732530651657Obtaining lock to block concurrent updates at 1732530651657Preparing flush snapshotting stores in 1588230740 at 1732530651657Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732530651657Flushing stores of hbase:meta,,1.1588230740 at 1732530651676 (+19 ms)Flushing 1588230740/info: creating writer at 1732530651676Flushing 1588230740/info: appending metadata at 1732530651692 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732530651692Flushing 1588230740/ns: creating writer at 1732530651703 (+11 ms)Flushing 1588230740/ns: appending metadata at 1732530651717 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732530651717Flushing 1588230740/table: creating writer at 1732530651731 (+14 ms)Flushing 1588230740/table: appending metadata at 1732530651746 (+15 ms)Flushing 1588230740/table: closing flushed file at 1732530651746Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@22b4d846: reopening flushed file at 1732530651758 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ab40557: reopening flushed file at 1732530651765 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62c970e6: reopening flushed file at 1732530651770 (+5 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 120ms, sequenceid=11, compaction requested=false at 1732530651777 (+7 ms)Writing region close event to WAL at 1732530651780 (+3 ms)Running coprocessor post-close hooks at 1732530651784 (+4 ms)Closed at 1732530651784 2024-11-25T10:30:51,784 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T10:30:51,829 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 is not closed yet, will try archiving it next time 2024-11-25T10:30:51,829 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530633601 to hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/oldWALs/e2fd862922a0%2C35207%2C1732530614007.1732530633601 2024-11-25T10:30:51,857 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(976): stopping server e2fd862922a0,35207,1732530614007; all regions closed. 2024-11-25T10:30:51,857 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,857 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,858 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,858 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,858 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:51,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741889_1074 (size=825) 2024-11-25T10:30:51,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741889_1074 (size=825) 2024-11-25T10:30:51,886 INFO [regionserver/e2fd862922a0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-25T10:30:51,886 INFO [regionserver/e2fd862922a0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-25T10:30:52,312 INFO [regionserver/e2fd862922a0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-25T10:30:52,312 INFO [regionserver/e2fd862922a0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-25T10:30:52,314 INFO [regionserver/e2fd862922a0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:30:52,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741862_1045 (size=13591) 2024-11-25T10:30:52,849 INFO [regionserver/e2fd862922a0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:30:52,880 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@13cc8de0[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46773, datanodeUuid=2cbdce5a-1e08-4a2d-aea4-9761244b9c0d, infoPort=42535, infoSecurePort=0, ipcPort=39031, storageInfo=lv=-57;cid=testClusterID;nsid=1228482375;c=1732530613132):Failed to transfer BP-1978960096-172.17.0.2-1732530613132:blk_1073741829_1005 to 127.0.0.1:37725 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:30:52,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:30:53,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:30:54,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-25T10:30:54,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T10:30:54,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T10:30:54,740 INFO [master/e2fd862922a0:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-25T10:30:54,740 INFO [master/e2fd862922a0:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-25T10:30:54,898 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@22a46c2d {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1978960096-172.17.0.2-1732530613132:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:35051,null,null]) java.net.ConnectException: Call From e2fd862922a0/172.17.0.2 to localhost:40229 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-25T10:30:54,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741830_1064 (size=27318) 2024-11-25T10:30:55,659 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 after 4002ms 2024-11-25T10:30:55,670 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta after 4002ms 2024-11-25T10:30:55,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:30:55,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741835_1011 (size=393) 2024-11-25T10:30:56,657 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-25T10:30:56,659 DEBUG [RS:1;e2fd862922a0:35123 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/oldWALs 2024-11-25T10:30:56,659 INFO [RS:1;e2fd862922a0:35123 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C35123%2C1732530614788:(num 1732530614982) 2024-11-25T10:30:56,659 DEBUG [RS:1;e2fd862922a0:35123 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:30:56,660 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:30:56,660 INFO [RS:1;e2fd862922a0:35123 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:30:56,660 INFO [RS:1;e2fd862922a0:35123 {}] hbase.ChoreService(370): Chore service for: regionserver/e2fd862922a0:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T10:30:56,660 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T10:30:56,660 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T10:30:56,660 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:30:56,660 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T10:30:56,660 INFO [RS:1;e2fd862922a0:35123 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:30:56,660 INFO [RS:1;e2fd862922a0:35123 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35123 2024-11-25T10:30:56,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35123-0x1014ead19430002, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2fd862922a0,35123,1732530614788 2024-11-25T10:30:56,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:30:56,662 INFO [RS:1;e2fd862922a0:35123 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:30:56,664 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2fd862922a0,35123,1732530614788] 2024-11-25T10:30:56,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:30:56,666 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2fd862922a0,35123,1732530614788 already deleted, retry=false 2024-11-25T10:30:56,666 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2fd862922a0,35123,1732530614788 expired; onlineServers=1 2024-11-25T10:30:56,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:56,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:56,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:56,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:56,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:56,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:56,715 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:56,716 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:56,764 INFO [RS:1;e2fd862922a0:35123 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:30:56,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35123-0x1014ead19430002, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:30:56,764 INFO [RS:1;e2fd862922a0:35123 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2fd862922a0,35123,1732530614788; zookeeper connection closed. 2024-11-25T10:30:56,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35123-0x1014ead19430002, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:30:56,765 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@38e0629a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@38e0629a 2024-11-25T10:30:56,858 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-25T10:30:56,861 DEBUG [RS:0;e2fd862922a0:35207 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/oldWALs 2024-11-25T10:30:56,861 INFO [RS:0;e2fd862922a0:35207 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C35207%2C1732530614007.meta:.meta(num 1732530651658) 2024-11-25T10:30:56,862 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:56,862 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:56,862 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:56,862 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:56,862 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:30:56,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741882_1066 (size=15140) 2024-11-25T10:30:56,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741882_1066 (size=15140) 2024-11-25T10:30:56,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741830_1064 (size=27318) 2024-11-25T10:30:57,218 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T10:30:57,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:57,230 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:57,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:57,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:57,231 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:57,232 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:57,235 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:57,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:30:57,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:30:57,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:30:57,676 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-25T10:30:58,666 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:30:58,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:30:59,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:30:59,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:30:59,676 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-25T10:31:00,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:00,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:01,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:01,677 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-25T10:31:01,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:01,863 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-25T10:31:01,866 DEBUG [RS:0;e2fd862922a0:35207 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/oldWALs 2024-11-25T10:31:01,866 INFO [RS:0;e2fd862922a0:35207 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C35207%2C1732530614007:(num 1732530651418) 2024-11-25T10:31:01,866 DEBUG [RS:0;e2fd862922a0:35207 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:31:01,866 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:31:01,866 INFO [RS:0;e2fd862922a0:35207 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:31:01,866 INFO [RS:0;e2fd862922a0:35207 {}] hbase.ChoreService(370): Chore service for: regionserver/e2fd862922a0:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T10:31:01,867 INFO [RS:0;e2fd862922a0:35207 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:31:01,867 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:31:01,867 INFO [RS:0;e2fd862922a0:35207 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35207 2024-11-25T10:31:01,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2fd862922a0,35207,1732530614007 2024-11-25T10:31:01,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:31:01,869 INFO [RS:0;e2fd862922a0:35207 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:31:01,870 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2fd862922a0,35207,1732530614007] 2024-11-25T10:31:01,871 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2fd862922a0,35207,1732530614007 already deleted, retry=false 2024-11-25T10:31:01,871 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2fd862922a0,35207,1732530614007 expired; onlineServers=0 2024-11-25T10:31:01,871 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e2fd862922a0,42939,1732530613957' ***** 2024-11-25T10:31:01,871 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T10:31:01,871 INFO [M:0;e2fd862922a0:42939 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:31:01,872 INFO [M:0;e2fd862922a0:42939 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:31:01,872 DEBUG [M:0;e2fd862922a0:42939 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T10:31:01,872 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T10:31:01,872 DEBUG [M:0;e2fd862922a0:42939 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T10:31:01,872 DEBUG [master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530614250 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530614250,5,FailOnTimeoutGroup] 2024-11-25T10:31:01,872 DEBUG [master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530614251 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530614251,5,FailOnTimeoutGroup] 2024-11-25T10:31:01,872 INFO [M:0;e2fd862922a0:42939 {}] hbase.ChoreService(370): Chore service for: master/e2fd862922a0:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T10:31:01,872 INFO [M:0;e2fd862922a0:42939 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:31:01,872 DEBUG [M:0;e2fd862922a0:42939 {}] master.HMaster(1795): Stopping service threads 2024-11-25T10:31:01,872 INFO [M:0;e2fd862922a0:42939 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T10:31:01,872 INFO [M:0;e2fd862922a0:42939 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T10:31:01,872 INFO [M:0;e2fd862922a0:42939 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T10:31:01,873 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T10:31:01,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T10:31:01,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:01,874 DEBUG [M:0;e2fd862922a0:42939 {}] zookeeper.ZKUtil(347): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T10:31:01,874 WARN [M:0;e2fd862922a0:42939 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T10:31:01,875 INFO [M:0;e2fd862922a0:42939 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/.lastflushedseqids 2024-11-25T10:31:01,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741895_1081 (size=130) 2024-11-25T10:31:01,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741895_1081 (size=130) 2024-11-25T10:31:01,882 INFO [M:0;e2fd862922a0:42939 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T10:31:01,882 INFO [M:0;e2fd862922a0:42939 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T10:31:01,883 DEBUG [M:0;e2fd862922a0:42939 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T10:31:01,883 INFO [M:0;e2fd862922a0:42939 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:01,883 DEBUG [M:0;e2fd862922a0:42939 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:01,883 DEBUG [M:0;e2fd862922a0:42939 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T10:31:01,883 DEBUG [M:0;e2fd862922a0:42939 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:01,883 INFO [M:0;e2fd862922a0:42939 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-25T10:31:01,899 DEBUG [M:0;e2fd862922a0:42939 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/12cca40548f64fb48b8ab71f6542fd91 is 82, key is hbase:meta,,1/info:regioninfo/1732530614687/Put/seqid=0 2024-11-25T10:31:01,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741896_1082 (size=5672) 2024-11-25T10:31:01,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741896_1082 (size=5672) 2024-11-25T10:31:01,904 INFO [M:0;e2fd862922a0:42939 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/12cca40548f64fb48b8ab71f6542fd91 2024-11-25T10:31:01,925 DEBUG [M:0;e2fd862922a0:42939 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3a1687442ce646cd89957e295fa97017 is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732530615271/Put/seqid=0 2024-11-25T10:31:01,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741897_1083 (size=6256) 2024-11-25T10:31:01,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741897_1083 (size=6256) 2024-11-25T10:31:01,931 INFO [M:0;e2fd862922a0:42939 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3a1687442ce646cd89957e295fa97017 2024-11-25T10:31:01,936 INFO [M:0;e2fd862922a0:42939 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3a1687442ce646cd89957e295fa97017 2024-11-25T10:31:01,951 DEBUG [M:0;e2fd862922a0:42939 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d619797561aa4a968a9d8380cbf2837d is 69, key is e2fd862922a0,35123,1732530614788/rs:state/1732530614831/Put/seqid=0 2024-11-25T10:31:01,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741898_1084 (size=5224) 2024-11-25T10:31:01,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741898_1084 (size=5224) 2024-11-25T10:31:01,957 INFO [M:0;e2fd862922a0:42939 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d619797561aa4a968a9d8380cbf2837d 2024-11-25T10:31:01,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:31:01,970 INFO [RS:0;e2fd862922a0:35207 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:31:01,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35207-0x1014ead19430001, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:31:01,970 INFO [RS:0;e2fd862922a0:35207 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2fd862922a0,35207,1732530614007; zookeeper connection closed. 2024-11-25T10:31:01,971 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@407f3572 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@407f3572 2024-11-25T10:31:01,971 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-25T10:31:01,977 DEBUG [M:0;e2fd862922a0:42939 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/38335b708d1743e09b31cdbb5a7bc270 is 52, key is load_balancer_on/state:d/1732530614746/Put/seqid=0 2024-11-25T10:31:01,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741899_1085 (size=5056) 2024-11-25T10:31:01,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741899_1085 (size=5056) 2024-11-25T10:31:01,982 INFO [M:0;e2fd862922a0:42939 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/38335b708d1743e09b31cdbb5a7bc270 2024-11-25T10:31:01,988 DEBUG [M:0;e2fd862922a0:42939 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/12cca40548f64fb48b8ab71f6542fd91 as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/12cca40548f64fb48b8ab71f6542fd91 2024-11-25T10:31:01,993 INFO [M:0;e2fd862922a0:42939 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/12cca40548f64fb48b8ab71f6542fd91, entries=8, sequenceid=60, filesize=5.5 K 2024-11-25T10:31:01,994 DEBUG [M:0;e2fd862922a0:42939 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3a1687442ce646cd89957e295fa97017 as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3a1687442ce646cd89957e295fa97017 2024-11-25T10:31:01,999 INFO [M:0;e2fd862922a0:42939 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3a1687442ce646cd89957e295fa97017 2024-11-25T10:31:01,999 INFO [M:0;e2fd862922a0:42939 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3a1687442ce646cd89957e295fa97017, entries=6, sequenceid=60, filesize=6.1 K 2024-11-25T10:31:02,000 DEBUG [M:0;e2fd862922a0:42939 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d619797561aa4a968a9d8380cbf2837d as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d619797561aa4a968a9d8380cbf2837d 2024-11-25T10:31:02,005 INFO [M:0;e2fd862922a0:42939 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d619797561aa4a968a9d8380cbf2837d, entries=2, sequenceid=60, filesize=5.1 K 2024-11-25T10:31:02,006 DEBUG [M:0;e2fd862922a0:42939 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/38335b708d1743e09b31cdbb5a7bc270 as hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/38335b708d1743e09b31cdbb5a7bc270 2024-11-25T10:31:02,011 INFO [M:0;e2fd862922a0:42939 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/38335b708d1743e09b31cdbb5a7bc270, entries=1, sequenceid=60, filesize=4.9 K 2024-11-25T10:31:02,012 INFO [M:0;e2fd862922a0:42939 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=60, compaction requested=false 2024-11-25T10:31:02,014 INFO [M:0;e2fd862922a0:42939 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:02,014 DEBUG [M:0;e2fd862922a0:42939 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732530661883Disabling compacts and flushes for region at 1732530661883Disabling writes for close at 1732530661883Obtaining lock to block concurrent updates at 1732530661883Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732530661883Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1732530661883Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732530661884 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732530661884Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732530661898 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732530661898Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732530661910 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732530661924 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732530661924Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732530661937 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732530661951 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732530661951Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732530661962 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732530661977 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732530661977Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@294938e2: reopening flushed file at 1732530661987 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@342d1b68: reopening flushed file at 1732530661993 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9c6c46a: reopening flushed file at 1732530661999 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78a4a5ca: reopening flushed file at 1732530662005 (+6 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=60, compaction requested=false at 1732530662012 (+7 ms)Writing region close event to WAL at 1732530662014 (+2 ms)Closed at 1732530662014 2024-11-25T10:31:02,014 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:02,014 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:02,015 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:02,015 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:02,015 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:02,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39809 is added to blk_1073741880_1063 (size=1045) 2024-11-25T10:31:02,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46773 is added to blk_1073741880_1063 (size=1045) 2024-11-25T10:31:02,018 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:31:02,018 INFO [M:0;e2fd862922a0:42939 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T10:31:02,018 INFO [M:0;e2fd862922a0:42939 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42939 2024-11-25T10:31:02,018 INFO [M:0;e2fd862922a0:42939 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:31:02,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:31:02,120 INFO [M:0;e2fd862922a0:42939 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:31:02,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42939-0x1014ead19430000, quorum=127.0.0.1:64111, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:31:02,123 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6869cf12{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:02,123 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17a19f67{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:31:02,123 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:31:02,123 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a48a14c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:31:02,123 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@616b5398{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir/,STOPPED} 2024-11-25T10:31:02,125 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:31:02,125 WARN [BP-1978960096-172.17.0.2-1732530613132 heartbeating to localhost/127.0.0.1:46735 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:31:02,125 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:31:02,125 WARN [BP-1978960096-172.17.0.2-1732530613132 heartbeating to localhost/127.0.0.1:46735 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1978960096-172.17.0.2-1732530613132 (Datanode Uuid 2cbdce5a-1e08-4a2d-aea4-9761244b9c0d) service to localhost/127.0.0.1:46735 2024-11-25T10:31:02,125 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data3/current/BP-1978960096-172.17.0.2-1732530613132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:02,126 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data4/current/BP-1978960096-172.17.0.2-1732530613132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:02,126 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:31:02,128 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@69de1683{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:02,128 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2801262{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:31:02,128 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:31:02,128 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41559526{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:31:02,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25509568{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir/,STOPPED} 2024-11-25T10:31:02,130 WARN [BP-1978960096-172.17.0.2-1732530613132 heartbeating to localhost/127.0.0.1:46735 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:31:02,130 WARN [BP-1978960096-172.17.0.2-1732530613132 heartbeating to localhost/127.0.0.1:46735 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1978960096-172.17.0.2-1732530613132 (Datanode Uuid 2c123028-200c-43b3-8f7b-64b4bc0bd9b4) service to localhost/127.0.0.1:46735 2024-11-25T10:31:02,130 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:31:02,130 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:31:02,130 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data9/current/BP-1978960096-172.17.0.2-1732530613132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:02,131 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/cluster_f67419b6-194d-cd6d-3ad5-3b380326cd86/data/data10/current/BP-1978960096-172.17.0.2-1732530613132 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:02,131 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:31:02,137 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a640c70{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T10:31:02,137 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@26313eab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:31:02,137 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:31:02,138 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17c48ca{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:31:02,138 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e7025d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir/,STOPPED} 2024-11-25T10:31:02,146 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T10:31:02,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T10:31:02,182 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 78) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46735 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46735 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$897/0x00007f2f74bef800.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$897/0x00007f2f74bef800.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46735 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:46735 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:46735 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:39899 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39899 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$897/0x00007f2f74bef800.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:46735 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:46735 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:46735 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46735 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46735 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:46735 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=440 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=102 (was 217), ProcessCount=11 (was 11), AvailableMemoryMB=1721 (was 2278) 2024-11-25T10:31:02,189 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=440, MaxFileDescriptor=1048576, SystemLoadAverage=102, ProcessCount=11, AvailableMemoryMB=1721 2024-11-25T10:31:02,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T10:31:02,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.log.dir so I do NOT create it in target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96 2024-11-25T10:31:02,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/cc32b973-db37-3a44-e227-85b0d99bb8ce/hadoop.tmp.dir so I do NOT create it in target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96 2024-11-25T10:31:02,190 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283, deleteOnExit=true 2024-11-25T10:31:02,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T10:31:02,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/test.cache.data in system properties and HBase conf 2024-11-25T10:31:02,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T10:31:02,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir in system properties and HBase conf 2024-11-25T10:31:02,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T10:31:02,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T10:31:02,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T10:31:02,191 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T10:31:02,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T10:31:02,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T10:31:02,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T10:31:02,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T10:31:02,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T10:31:02,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T10:31:02,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T10:31:02,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T10:31:02,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T10:31:02,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/nfs.dump.dir in system properties and HBase conf 2024-11-25T10:31:02,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/java.io.tmpdir in system properties and HBase conf 2024-11-25T10:31:02,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T10:31:02,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T10:31:02,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T10:31:02,205 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T10:31:02,287 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:31:02,291 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:31:02,293 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:31:02,293 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:31:02,293 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T10:31:02,293 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:31:02,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48d7ddcf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:31:02,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c727387{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:31:02,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d60493b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/java.io.tmpdir/jetty-localhost-33857-hadoop-hdfs-3_4_1-tests_jar-_-any-15256833353339749974/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T10:31:02,410 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@14bd1a08{HTTP/1.1, (http/1.1)}{localhost:33857} 2024-11-25T10:31:02,410 INFO [Time-limited test {}] server.Server(415): Started @153923ms 2024-11-25T10:31:02,424 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T10:31:02,519 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:31:02,523 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:31:02,524 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:31:02,524 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:31:02,524 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:31:02,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5485abbd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:31:02,528 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29ec962a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:31:02,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:02,646 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ff32a1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/java.io.tmpdir/jetty-localhost-35865-hadoop-hdfs-3_4_1-tests_jar-_-any-14780936137854717792/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:02,646 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b09b5f3{HTTP/1.1, (http/1.1)}{localhost:35865} 2024-11-25T10:31:02,647 INFO [Time-limited test {}] server.Server(415): Started @154159ms 2024-11-25T10:31:02,648 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:31:02,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:02,675 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:31:02,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:02,679 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:31:02,680 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:31:02,680 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:31:02,680 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:31:02,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a99c341{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:31:02,681 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c7a3196{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:31:02,760 WARN [Thread-1186 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data2/current/BP-1372184995-172.17.0.2-1732530662222/current, will proceed with Du for space computation calculation, 2024-11-25T10:31:02,760 WARN [Thread-1185 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data1/current/BP-1372184995-172.17.0.2-1732530662222/current, will proceed with Du for space computation calculation, 2024-11-25T10:31:02,777 WARN [Thread-1164 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:31:02,779 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xea8211c6d5b75afe with lease ID 0xdac11801f85f1a15: Processing first storage report for DS-59eba3af-66fd-4286-b40d-73839eeaf9a8 from datanode DatanodeRegistration(127.0.0.1:33621, datanodeUuid=7b2787c5-03c0-4c45-a4bd-62d731defb45, infoPort=38703, infoSecurePort=0, ipcPort=36429, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222) 2024-11-25T10:31:02,779 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xea8211c6d5b75afe with lease ID 0xdac11801f85f1a15: from storage DS-59eba3af-66fd-4286-b40d-73839eeaf9a8 node DatanodeRegistration(127.0.0.1:33621, datanodeUuid=7b2787c5-03c0-4c45-a4bd-62d731defb45, infoPort=38703, infoSecurePort=0, ipcPort=36429, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:31:02,780 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xea8211c6d5b75afe with lease ID 0xdac11801f85f1a15: Processing first storage report for DS-08943d17-775e-4a78-b7a9-2c4a0248b017 from datanode DatanodeRegistration(127.0.0.1:33621, datanodeUuid=7b2787c5-03c0-4c45-a4bd-62d731defb45, infoPort=38703, infoSecurePort=0, ipcPort=36429, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222) 2024-11-25T10:31:02,780 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xea8211c6d5b75afe with lease ID 0xdac11801f85f1a15: from storage DS-08943d17-775e-4a78-b7a9-2c4a0248b017 node DatanodeRegistration(127.0.0.1:33621, datanodeUuid=7b2787c5-03c0-4c45-a4bd-62d731defb45, infoPort=38703, infoSecurePort=0, ipcPort=36429, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:31:02,799 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@70df7796{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/java.io.tmpdir/jetty-localhost-35579-hadoop-hdfs-3_4_1-tests_jar-_-any-10769681474765165397/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:02,799 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@390ee286{HTTP/1.1, (http/1.1)}{localhost:35579} 2024-11-25T10:31:02,799 INFO [Time-limited test {}] server.Server(415): Started @154312ms 2024-11-25T10:31:02,801 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:31:02,894 WARN [Thread-1211 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data3/current/BP-1372184995-172.17.0.2-1732530662222/current, will proceed with Du for space computation calculation, 2024-11-25T10:31:02,894 WARN [Thread-1212 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data4/current/BP-1372184995-172.17.0.2-1732530662222/current, will proceed with Du for space computation calculation, 2024-11-25T10:31:02,910 WARN [Thread-1200 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:31:02,912 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb7cea1190498af30 with lease ID 0xdac11801f85f1a16: Processing first storage report for DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7 from datanode DatanodeRegistration(127.0.0.1:35795, datanodeUuid=3635aecd-1c36-4a07-86cd-46be7fcb50be, infoPort=41027, infoSecurePort=0, ipcPort=39121, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222) 2024-11-25T10:31:02,912 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7cea1190498af30 with lease ID 0xdac11801f85f1a16: from storage DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7 node DatanodeRegistration(127.0.0.1:35795, datanodeUuid=3635aecd-1c36-4a07-86cd-46be7fcb50be, infoPort=41027, infoSecurePort=0, ipcPort=39121, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:31:02,913 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb7cea1190498af30 with lease ID 0xdac11801f85f1a16: Processing first storage report for DS-9f9fc9ec-e253-4b30-9b94-9a569805a8e7 from datanode DatanodeRegistration(127.0.0.1:35795, datanodeUuid=3635aecd-1c36-4a07-86cd-46be7fcb50be, infoPort=41027, infoSecurePort=0, ipcPort=39121, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222) 2024-11-25T10:31:02,913 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7cea1190498af30 with lease ID 0xdac11801f85f1a16: from storage DS-9f9fc9ec-e253-4b30-9b94-9a569805a8e7 node DatanodeRegistration(127.0.0.1:35795, datanodeUuid=3635aecd-1c36-4a07-86cd-46be7fcb50be, infoPort=41027, infoSecurePort=0, ipcPort=39121, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T10:31:02,924 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96 2024-11-25T10:31:02,926 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/zookeeper_0, clientPort=56140, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T10:31:02,927 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56140 2024-11-25T10:31:02,927 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:02,929 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:02,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:31:02,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35795 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:31:02,943 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79 with version=8 2024-11-25T10:31:02,943 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/hbase-staging 2024-11-25T10:31:02,946 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:31:02,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:31:02,947 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:31:02,947 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:31:02,947 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:31:02,947 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:31:02,947 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T10:31:02,947 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:31:02,948 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35975 2024-11-25T10:31:02,949 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35975 connecting to ZooKeeper ensemble=127.0.0.1:56140 2024-11-25T10:31:02,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:359750x0, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:31:02,957 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35975-0x1014eadd89d0000 connected 2024-11-25T10:31:02,972 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:02,974 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:02,975 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:31:02,975 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79, hbase.cluster.distributed=false 2024-11-25T10:31:02,977 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:31:02,980 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35975 2024-11-25T10:31:02,980 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35975 2024-11-25T10:31:02,981 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35975 2024-11-25T10:31:02,981 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35975 2024-11-25T10:31:02,981 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35975 2024-11-25T10:31:02,996 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:31:02,996 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:31:02,996 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:31:02,996 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:31:02,996 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:31:02,996 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:31:02,997 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T10:31:02,997 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:31:02,997 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38959 2024-11-25T10:31:02,998 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38959 connecting to ZooKeeper ensemble=127.0.0.1:56140 2024-11-25T10:31:02,999 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:03,000 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:03,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:389590x0, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:31:03,004 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:389590x0, quorum=127.0.0.1:56140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:31:03,004 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38959-0x1014eadd89d0001 connected 2024-11-25T10:31:03,005 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T10:31:03,005 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T10:31:03,006 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T10:31:03,006 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:31:03,007 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38959 2024-11-25T10:31:03,007 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38959 2024-11-25T10:31:03,007 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38959 2024-11-25T10:31:03,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38959 2024-11-25T10:31:03,008 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38959 2024-11-25T10:31:03,019 DEBUG [M:0;e2fd862922a0:35975 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e2fd862922a0:35975 2024-11-25T10:31:03,020 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e2fd862922a0,35975,1732530662946 2024-11-25T10:31:03,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:31:03,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:31:03,021 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e2fd862922a0,35975,1732530662946 2024-11-25T10:31:03,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T10:31:03,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:03,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:03,023 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T10:31:03,024 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e2fd862922a0,35975,1732530662946 from backup master directory 2024-11-25T10:31:03,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e2fd862922a0,35975,1732530662946 2024-11-25T10:31:03,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:31:03,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:31:03,026 WARN [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:31:03,027 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e2fd862922a0,35975,1732530662946 2024-11-25T10:31:03,031 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/hbase.id] with ID: b8f222a0-56f2-431b-b264-eb135a069c13 2024-11-25T10:31:03,031 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/.tmp/hbase.id 2024-11-25T10:31:03,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741826_1002 (size=42) 2024-11-25T10:31:03,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35795 is added to blk_1073741826_1002 (size=42) 2024-11-25T10:31:03,037 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/.tmp/hbase.id]:[hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/hbase.id] 2024-11-25T10:31:03,048 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:03,048 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T10:31:03,049 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-25T10:31:03,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:03,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:03,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35795 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:31:03,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:31:03,059 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T10:31:03,059 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T10:31:03,060 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:31:03,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741828_1004 (size=1189) 2024-11-25T10:31:03,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35795 is added to blk_1073741828_1004 (size=1189) 2024-11-25T10:31:03,067 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store 2024-11-25T10:31:03,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741829_1005 (size=34) 2024-11-25T10:31:03,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35795 is added to blk_1073741829_1005 (size=34) 2024-11-25T10:31:03,074 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:31:03,074 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T10:31:03,074 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:03,074 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:03,074 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T10:31:03,074 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:03,074 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:03,074 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732530663074Disabling compacts and flushes for region at 1732530663074Disabling writes for close at 1732530663074Writing region close event to WAL at 1732530663074Closed at 1732530663074 2024-11-25T10:31:03,075 WARN [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/.initializing 2024-11-25T10:31:03,075 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/WALs/e2fd862922a0,35975,1732530662946 2024-11-25T10:31:03,077 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C35975%2C1732530662946, suffix=, logDir=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/WALs/e2fd862922a0,35975,1732530662946, archiveDir=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/oldWALs, maxLogs=10 2024-11-25T10:31:03,078 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C35975%2C1732530662946.1732530663078 2024-11-25T10:31:03,082 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/WALs/e2fd862922a0,35975,1732530662946/e2fd862922a0%2C35975%2C1732530662946.1732530663078 2024-11-25T10:31:03,083 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41027:41027),(127.0.0.1/127.0.0.1:38703:38703)] 2024-11-25T10:31:03,084 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:31:03,084 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:31:03,084 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:03,084 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:03,086 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:03,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T10:31:03,087 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:03,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:03,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:03,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T10:31:03,089 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:03,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:31:03,089 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:03,090 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T10:31:03,090 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:03,091 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:31:03,091 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:03,092 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T10:31:03,092 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:03,092 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:31:03,092 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:03,093 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:03,093 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:03,094 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:03,094 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:03,095 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T10:31:03,096 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:03,098 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:31:03,098 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734687, jitterRate=-0.06579789519309998}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T10:31:03,099 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732530663085Initializing all the Stores at 1732530663085Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530663085Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530663085Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530663085Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530663085Cleaning up temporary data from old regions at 1732530663094 (+9 ms)Region opened successfully at 1732530663099 (+5 ms) 2024-11-25T10:31:03,099 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T10:31:03,102 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30f2763b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:31:03,103 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T10:31:03,103 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T10:31:03,103 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T10:31:03,103 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T10:31:03,104 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T10:31:03,104 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T10:31:03,104 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T10:31:03,106 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T10:31:03,107 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T10:31:03,108 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T10:31:03,108 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T10:31:03,109 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T10:31:03,110 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T10:31:03,110 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T10:31:03,111 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T10:31:03,113 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T10:31:03,114 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T10:31:03,115 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T10:31:03,117 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T10:31:03,119 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T10:31:03,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T10:31:03,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T10:31:03,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:03,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:03,121 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e2fd862922a0,35975,1732530662946, sessionid=0x1014eadd89d0000, setting cluster-up flag (Was=false) 2024-11-25T10:31:03,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:03,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:03,130 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T10:31:03,131 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2fd862922a0,35975,1732530662946 2024-11-25T10:31:03,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:03,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:03,139 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T10:31:03,140 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2fd862922a0,35975,1732530662946 2024-11-25T10:31:03,141 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T10:31:03,143 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T10:31:03,143 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T10:31:03,143 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T10:31:03,143 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e2fd862922a0,35975,1732530662946 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T10:31:03,145 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:31:03,145 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:31:03,145 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:31:03,145 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:31:03,145 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e2fd862922a0:0, corePoolSize=10, maxPoolSize=10 2024-11-25T10:31:03,145 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:03,145 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:31:03,145 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:03,147 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732530693147 2024-11-25T10:31:03,147 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T10:31:03,147 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:31:03,147 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T10:31:03,147 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T10:31:03,147 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T10:31:03,147 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T10:31:03,147 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T10:31:03,147 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T10:31:03,147 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,148 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T10:31:03,148 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T10:31:03,148 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T10:31:03,148 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:03,148 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T10:31:03,148 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T10:31:03,148 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T10:31:03,150 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530663148,5,FailOnTimeoutGroup] 2024-11-25T10:31:03,150 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530663150,5,FailOnTimeoutGroup] 2024-11-25T10:31:03,150 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,150 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T10:31:03,150 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,150 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35795 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:31:03,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:31:03,156 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T10:31:03,157 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79 2024-11-25T10:31:03,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:31:03,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35795 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:31:03,163 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:31:03,165 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T10:31:03,166 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T10:31:03,166 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:03,166 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:03,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T10:31:03,168 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T10:31:03,168 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:03,168 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:03,169 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T10:31:03,170 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T10:31:03,170 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:03,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:03,171 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T10:31:03,172 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T10:31:03,172 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:03,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:03,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T10:31:03,173 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740 2024-11-25T10:31:03,174 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740 2024-11-25T10:31:03,175 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T10:31:03,175 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T10:31:03,176 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T10:31:03,177 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T10:31:03,179 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:31:03,179 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=814955, jitterRate=0.03626890480518341}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T10:31:03,180 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732530663164Initializing all the Stores at 1732530663164Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530663164Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530663164Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530663164Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530663165 (+1 ms)Cleaning up temporary data from old regions at 1732530663175 (+10 ms)Region opened successfully at 1732530663180 (+5 ms) 2024-11-25T10:31:03,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T10:31:03,181 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T10:31:03,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T10:31:03,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T10:31:03,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T10:31:03,181 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T10:31:03,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732530663181Disabling compacts and flushes for region at 1732530663181Disabling writes for close at 1732530663181Writing region close event to WAL at 1732530663181Closed at 1732530663181 2024-11-25T10:31:03,183 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:31:03,183 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T10:31:03,183 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T10:31:03,185 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T10:31:03,186 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T10:31:03,210 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(746): ClusterId : b8f222a0-56f2-431b-b264-eb135a069c13 2024-11-25T10:31:03,210 DEBUG [RS:0;e2fd862922a0:38959 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T10:31:03,212 DEBUG [RS:0;e2fd862922a0:38959 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T10:31:03,212 DEBUG [RS:0;e2fd862922a0:38959 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T10:31:03,214 DEBUG [RS:0;e2fd862922a0:38959 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T10:31:03,214 DEBUG [RS:0;e2fd862922a0:38959 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@193e01a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:31:03,227 DEBUG [RS:0;e2fd862922a0:38959 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e2fd862922a0:38959 2024-11-25T10:31:03,227 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T10:31:03,227 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T10:31:03,227 DEBUG [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T10:31:03,228 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2fd862922a0,35975,1732530662946 with port=38959, startcode=1732530662996 2024-11-25T10:31:03,228 DEBUG [RS:0;e2fd862922a0:38959 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T10:31:03,230 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59551, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T10:31:03,231 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35975 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2fd862922a0,38959,1732530662996 2024-11-25T10:31:03,231 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35975 {}] master.ServerManager(517): Registering regionserver=e2fd862922a0,38959,1732530662996 2024-11-25T10:31:03,232 DEBUG [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79 2024-11-25T10:31:03,232 DEBUG [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41637 2024-11-25T10:31:03,232 DEBUG [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T10:31:03,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:31:03,236 DEBUG [RS:0;e2fd862922a0:38959 {}] zookeeper.ZKUtil(111): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2fd862922a0,38959,1732530662996 2024-11-25T10:31:03,236 WARN [RS:0;e2fd862922a0:38959 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:31:03,236 INFO [RS:0;e2fd862922a0:38959 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:31:03,236 DEBUG [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996 2024-11-25T10:31:03,236 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2fd862922a0,38959,1732530662996] 2024-11-25T10:31:03,240 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T10:31:03,241 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T10:31:03,242 INFO [RS:0;e2fd862922a0:38959 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T10:31:03,242 INFO [RS:0;e2fd862922a0:38959 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,242 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T10:31:03,243 INFO [RS:0;e2fd862922a0:38959 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T10:31:03,243 INFO [RS:0;e2fd862922a0:38959 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,243 DEBUG [RS:0;e2fd862922a0:38959 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:03,243 DEBUG [RS:0;e2fd862922a0:38959 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:03,243 DEBUG [RS:0;e2fd862922a0:38959 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:03,243 DEBUG [RS:0;e2fd862922a0:38959 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:03,243 DEBUG [RS:0;e2fd862922a0:38959 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:03,243 DEBUG [RS:0;e2fd862922a0:38959 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:31:03,243 DEBUG [RS:0;e2fd862922a0:38959 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:03,243 DEBUG [RS:0;e2fd862922a0:38959 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:03,243 DEBUG [RS:0;e2fd862922a0:38959 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:03,243 DEBUG [RS:0;e2fd862922a0:38959 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:03,243 DEBUG [RS:0;e2fd862922a0:38959 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:03,243 DEBUG [RS:0;e2fd862922a0:38959 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:03,243 DEBUG [RS:0;e2fd862922a0:38959 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:31:03,243 DEBUG [RS:0;e2fd862922a0:38959 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:31:03,248 INFO [RS:0;e2fd862922a0:38959 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,248 INFO [RS:0;e2fd862922a0:38959 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,248 INFO [RS:0;e2fd862922a0:38959 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,249 INFO [RS:0;e2fd862922a0:38959 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,249 INFO [RS:0;e2fd862922a0:38959 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,249 INFO [RS:0;e2fd862922a0:38959 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,38959,1732530662996-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:31:03,264 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T10:31:03,264 INFO [RS:0;e2fd862922a0:38959 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,38959,1732530662996-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,264 INFO [RS:0;e2fd862922a0:38959 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,264 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.Replication(171): e2fd862922a0,38959,1732530662996 started 2024-11-25T10:31:03,278 INFO [RS:0;e2fd862922a0:38959 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,278 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(1482): Serving as e2fd862922a0,38959,1732530662996, RpcServer on e2fd862922a0/172.17.0.2:38959, sessionid=0x1014eadd89d0001 2024-11-25T10:31:03,279 DEBUG [RS:0;e2fd862922a0:38959 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T10:31:03,279 DEBUG [RS:0;e2fd862922a0:38959 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2fd862922a0,38959,1732530662996 2024-11-25T10:31:03,279 DEBUG [RS:0;e2fd862922a0:38959 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,38959,1732530662996' 2024-11-25T10:31:03,279 DEBUG [RS:0;e2fd862922a0:38959 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T10:31:03,279 DEBUG [RS:0;e2fd862922a0:38959 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T10:31:03,280 DEBUG [RS:0;e2fd862922a0:38959 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T10:31:03,280 DEBUG [RS:0;e2fd862922a0:38959 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T10:31:03,280 DEBUG [RS:0;e2fd862922a0:38959 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2fd862922a0,38959,1732530662996 2024-11-25T10:31:03,280 DEBUG [RS:0;e2fd862922a0:38959 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,38959,1732530662996' 2024-11-25T10:31:03,280 DEBUG [RS:0;e2fd862922a0:38959 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T10:31:03,280 DEBUG [RS:0;e2fd862922a0:38959 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T10:31:03,280 DEBUG [RS:0;e2fd862922a0:38959 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T10:31:03,280 INFO [RS:0;e2fd862922a0:38959 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T10:31:03,280 INFO [RS:0;e2fd862922a0:38959 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T10:31:03,336 WARN [e2fd862922a0:35975 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T10:31:03,383 INFO [RS:0;e2fd862922a0:38959 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C38959%2C1732530662996, suffix=, logDir=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996, archiveDir=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/oldWALs, maxLogs=32 2024-11-25T10:31:03,384 INFO [RS:0;e2fd862922a0:38959 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C38959%2C1732530662996.1732530663383 2024-11-25T10:31:03,390 INFO [RS:0;e2fd862922a0:38959 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 2024-11-25T10:31:03,390 DEBUG [RS:0;e2fd862922a0:38959 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38703:38703),(127.0.0.1/127.0.0.1:41027:41027)] 2024-11-25T10:31:03,586 DEBUG [e2fd862922a0:35975 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T10:31:03,587 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e2fd862922a0,38959,1732530662996 2024-11-25T10:31:03,588 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2fd862922a0,38959,1732530662996, state=OPENING 2024-11-25T10:31:03,590 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T10:31:03,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:03,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:03,592 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T10:31:03,592 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:31:03,592 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:31:03,592 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e2fd862922a0,38959,1732530662996}] 2024-11-25T10:31:03,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:03,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:03,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:03,745 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T10:31:03,747 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35331, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T10:31:03,751 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T10:31:03,751 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:31:03,753 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C38959%2C1732530662996.meta, suffix=.meta, logDir=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996, archiveDir=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/oldWALs, maxLogs=32 2024-11-25T10:31:03,754 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C38959%2C1732530662996.meta.1732530663754.meta 2024-11-25T10:31:03,760 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.meta.1732530663754.meta 2024-11-25T10:31:03,762 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41027:41027),(127.0.0.1/127.0.0.1:38703:38703)] 2024-11-25T10:31:03,768 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:31:03,769 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T10:31:03,769 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T10:31:03,769 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T10:31:03,769 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T10:31:03,769 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:31:03,769 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T10:31:03,769 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T10:31:03,770 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T10:31:03,771 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T10:31:03,771 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:03,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:03,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T10:31:03,772 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T10:31:03,772 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:03,773 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:03,773 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T10:31:03,774 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T10:31:03,774 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:03,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:03,775 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T10:31:03,775 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T10:31:03,775 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:03,776 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:03,776 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T10:31:03,777 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740 2024-11-25T10:31:03,778 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740 2024-11-25T10:31:03,779 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T10:31:03,779 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T10:31:03,780 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T10:31:03,781 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T10:31:03,782 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805067, jitterRate=0.023696407675743103}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T10:31:03,782 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T10:31:03,782 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732530663769Writing region info on filesystem at 1732530663769Initializing all the Stores at 1732530663770 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530663770Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530663770Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530663770Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530663770Cleaning up temporary data from old regions at 1732530663779 (+9 ms)Running coprocessor post-open hooks at 1732530663782 (+3 ms)Region opened successfully at 1732530663782 2024-11-25T10:31:03,784 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732530663745 2024-11-25T10:31:03,786 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T10:31:03,786 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T10:31:03,787 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e2fd862922a0,38959,1732530662996 2024-11-25T10:31:03,788 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2fd862922a0,38959,1732530662996, state=OPEN 2024-11-25T10:31:03,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T10:31:03,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T10:31:03,793 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e2fd862922a0,38959,1732530662996 2024-11-25T10:31:03,793 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:31:03,793 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:31:03,796 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T10:31:03,796 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e2fd862922a0,38959,1732530662996 in 201 msec 2024-11-25T10:31:03,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T10:31:03,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 613 msec 2024-11-25T10:31:03,799 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:31:03,799 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T10:31:03,800 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T10:31:03,801 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2fd862922a0,38959,1732530662996, seqNum=-1] 2024-11-25T10:31:03,801 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T10:31:03,802 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47411, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T10:31:03,807 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 665 msec 2024-11-25T10:31:03,808 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732530663808, completionTime=-1 2024-11-25T10:31:03,808 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T10:31:03,808 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T10:31:03,810 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T10:31:03,810 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732530723810 2024-11-25T10:31:03,810 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732530783810 2024-11-25T10:31:03,810 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-25T10:31:03,810 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,35975,1732530662946-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,810 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,35975,1732530662946-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,810 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,35975,1732530662946-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,810 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e2fd862922a0:35975, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,810 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,811 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,813 DEBUG [master/e2fd862922a0:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T10:31:03,814 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.787sec 2024-11-25T10:31:03,814 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T10:31:03,814 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T10:31:03,814 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T10:31:03,814 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T10:31:03,814 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T10:31:03,814 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,35975,1732530662946-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:31:03,815 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,35975,1732530662946-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T10:31:03,817 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T10:31:03,817 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T10:31:03,817 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,35975,1732530662946-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:03,910 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37b5496d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:31:03,910 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e2fd862922a0,35975,-1 for getting cluster id 2024-11-25T10:31:03,910 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T10:31:03,912 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b8f222a0-56f2-431b-b264-eb135a069c13' 2024-11-25T10:31:03,913 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T10:31:03,913 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b8f222a0-56f2-431b-b264-eb135a069c13" 2024-11-25T10:31:03,913 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eec9ec9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:31:03,913 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2fd862922a0,35975,-1] 2024-11-25T10:31:03,913 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T10:31:03,914 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:31:03,915 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44544, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T10:31:03,916 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1be3b2f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:31:03,916 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T10:31:03,917 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2fd862922a0,38959,1732530662996, seqNum=-1] 2024-11-25T10:31:03,918 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T10:31:03,919 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35724, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T10:31:03,921 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e2fd862922a0,35975,1732530662946 2024-11-25T10:31:03,921 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:03,924 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T10:31:03,924 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-25T10:31:03,924 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-25T10:31:03,924 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T10:31:03,925 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is e2fd862922a0,35975,1732530662946 2024-11-25T10:31:03,925 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4bdf6a61 2024-11-25T10:31:03,925 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T10:31:03,927 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44546, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T10:31:03,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35975 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T10:31:03,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35975 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T10:31:03,928 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35975 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T10:31:03,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35975 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-25T10:31:03,930 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T10:31:03,930 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:03,930 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35975 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-25T10:31:03,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35975 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T10:31:03,931 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T10:31:03,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35795 is added to blk_1073741835_1011 (size=395) 2024-11-25T10:31:03,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741835_1011 (size=395) 2024-11-25T10:31:03,940 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2daf7a55b6b3eca8ca14b1de86eb6baf, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79 2024-11-25T10:31:03,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33621 is added to blk_1073741836_1012 (size=78) 2024-11-25T10:31:03,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35795 is added to blk_1073741836_1012 (size=78) 2024-11-25T10:31:03,947 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:31:03,947 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 2daf7a55b6b3eca8ca14b1de86eb6baf, disabling compactions & flushes 2024-11-25T10:31:03,947 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. 2024-11-25T10:31:03,947 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. 2024-11-25T10:31:03,947 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. after waiting 0 ms 2024-11-25T10:31:03,947 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. 2024-11-25T10:31:03,947 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. 2024-11-25T10:31:03,947 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2daf7a55b6b3eca8ca14b1de86eb6baf: Waiting for close lock at 1732530663947Disabling compacts and flushes for region at 1732530663947Disabling writes for close at 1732530663947Writing region close event to WAL at 1732530663947Closed at 1732530663947 2024-11-25T10:31:03,949 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T10:31:03,949 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732530663949"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732530663949"}]},"ts":"1732530663949"} 2024-11-25T10:31:03,952 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T10:31:03,953 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T10:31:03,953 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732530663953"}]},"ts":"1732530663953"} 2024-11-25T10:31:03,955 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-25T10:31:03,956 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=2daf7a55b6b3eca8ca14b1de86eb6baf, ASSIGN}] 2024-11-25T10:31:03,957 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=2daf7a55b6b3eca8ca14b1de86eb6baf, ASSIGN 2024-11-25T10:31:03,958 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=2daf7a55b6b3eca8ca14b1de86eb6baf, ASSIGN; state=OFFLINE, location=e2fd862922a0,38959,1732530662996; forceNewPlan=false, retain=false 2024-11-25T10:31:04,109 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2daf7a55b6b3eca8ca14b1de86eb6baf, regionState=OPENING, regionLocation=e2fd862922a0,38959,1732530662996 2024-11-25T10:31:04,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=2daf7a55b6b3eca8ca14b1de86eb6baf, ASSIGN because future has completed 2024-11-25T10:31:04,112 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2daf7a55b6b3eca8ca14b1de86eb6baf, server=e2fd862922a0,38959,1732530662996}] 2024-11-25T10:31:04,270 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. 2024-11-25T10:31:04,270 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 2daf7a55b6b3eca8ca14b1de86eb6baf, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf.', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:31:04,270 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 2daf7a55b6b3eca8ca14b1de86eb6baf 2024-11-25T10:31:04,270 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:31:04,270 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 2daf7a55b6b3eca8ca14b1de86eb6baf 2024-11-25T10:31:04,270 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 2daf7a55b6b3eca8ca14b1de86eb6baf 2024-11-25T10:31:04,272 INFO [StoreOpener-2daf7a55b6b3eca8ca14b1de86eb6baf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2daf7a55b6b3eca8ca14b1de86eb6baf 2024-11-25T10:31:04,273 INFO [StoreOpener-2daf7a55b6b3eca8ca14b1de86eb6baf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2daf7a55b6b3eca8ca14b1de86eb6baf columnFamilyName info 2024-11-25T10:31:04,273 DEBUG [StoreOpener-2daf7a55b6b3eca8ca14b1de86eb6baf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:04,274 INFO [StoreOpener-2daf7a55b6b3eca8ca14b1de86eb6baf-1 {}] regionserver.HStore(327): Store=2daf7a55b6b3eca8ca14b1de86eb6baf/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:31:04,274 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 2daf7a55b6b3eca8ca14b1de86eb6baf 2024-11-25T10:31:04,274 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/default/TestLogRolling-testLogRollOnPipelineRestart/2daf7a55b6b3eca8ca14b1de86eb6baf 2024-11-25T10:31:04,275 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/default/TestLogRolling-testLogRollOnPipelineRestart/2daf7a55b6b3eca8ca14b1de86eb6baf 2024-11-25T10:31:04,275 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 2daf7a55b6b3eca8ca14b1de86eb6baf 2024-11-25T10:31:04,275 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 2daf7a55b6b3eca8ca14b1de86eb6baf 2024-11-25T10:31:04,277 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 2daf7a55b6b3eca8ca14b1de86eb6baf 2024-11-25T10:31:04,279 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/default/TestLogRolling-testLogRollOnPipelineRestart/2daf7a55b6b3eca8ca14b1de86eb6baf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:31:04,280 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 2daf7a55b6b3eca8ca14b1de86eb6baf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804603, jitterRate=0.023105904459953308}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T10:31:04,280 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2daf7a55b6b3eca8ca14b1de86eb6baf 2024-11-25T10:31:04,281 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 2daf7a55b6b3eca8ca14b1de86eb6baf: Running coprocessor pre-open hook at 1732530664270Writing region info on filesystem at 1732530664270Initializing all the Stores at 1732530664271 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530664271Cleaning up temporary data from old regions at 1732530664275 (+4 ms)Running coprocessor post-open hooks at 1732530664280 (+5 ms)Region opened successfully at 1732530664281 (+1 ms) 2024-11-25T10:31:04,282 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf., pid=6, masterSystemTime=1732530664266 2024-11-25T10:31:04,284 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. 2024-11-25T10:31:04,284 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. 2024-11-25T10:31:04,285 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2daf7a55b6b3eca8ca14b1de86eb6baf, regionState=OPEN, openSeqNum=2, regionLocation=e2fd862922a0,38959,1732530662996 2024-11-25T10:31:04,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2daf7a55b6b3eca8ca14b1de86eb6baf, server=e2fd862922a0,38959,1732530662996 because future has completed 2024-11-25T10:31:04,291 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T10:31:04,291 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 2daf7a55b6b3eca8ca14b1de86eb6baf, server=e2fd862922a0,38959,1732530662996 in 177 msec 2024-11-25T10:31:04,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T10:31:04,294 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=2daf7a55b6b3eca8ca14b1de86eb6baf, ASSIGN in 336 msec 2024-11-25T10:31:04,295 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T10:31:04,295 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732530664295"}]},"ts":"1732530664295"} 2024-11-25T10:31:04,297 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-25T10:31:04,298 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T10:31:04,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 370 msec 2024-11-25T10:31:04,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T10:31:04,327 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-25T10:31:04,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-25T10:31:04,328 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-25T10:31:04,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T10:31:04,328 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-25T10:31:04,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:04,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:04,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:05,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:05,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:05,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:06,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:06,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:06,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:07,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:07,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:07,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:08,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:08,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:08,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:08,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:08,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:08,780 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:08,781 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:08,781 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:08,781 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:08,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:08,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:08,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:08,786 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:09,290 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T10:31:09,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:09,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:09,305 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:09,306 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:09,306 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:09,307 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:09,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:09,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:09,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:09,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:09,320 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T10:31:09,321 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-25T10:31:09,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:09,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:09,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:10,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:10,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:10,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:11,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:11,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:11,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:12,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:12,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:12,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:13,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:13,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:13,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:13,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35975 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T10:31:13,982 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-25T10:31:13,982 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-25T10:31:13,985 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-25T10:31:13,985 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. 2024-11-25T10:31:13,989 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf., hostname=e2fd862922a0,38959,1732530662996, seqNum=2] 2024-11-25T10:31:14,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:14,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:14,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:15,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:15,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:15,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:15,992 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 2024-11-25T10:31:15,992 WARN [ResponseProcessor for block BP-1372184995-172.17.0.2-1732530662222:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1372184995-172.17.0.2-1732530662222:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:15,992 WARN [ResponseProcessor for block BP-1372184995-172.17.0.2-1732530662222:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1372184995-172.17.0.2-1732530662222:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:15,992 WARN [ResponseProcessor for block BP-1372184995-172.17.0.2-1732530662222:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1372184995-172.17.0.2-1732530662222:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1372184995-172.17.0.2-1732530662222:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:35795,DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:15,993 WARN [DataStreamer for file /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 block BP-1372184995-172.17.0.2-1732530662222:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1372184995-172.17.0.2-1732530662222:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK], DatanodeInfoWithStorage[127.0.0.1:35795,DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35795,DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7,DISK]) is bad. 2024-11-25T10:31:15,993 WARN [DataStreamer for file /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/WALs/e2fd862922a0,35975,1732530662946/e2fd862922a0%2C35975%2C1732530662946.1732530663078 block BP-1372184995-172.17.0.2-1732530662222:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1372184995-172.17.0.2-1732530662222:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35795,DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7,DISK], DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35795,DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7,DISK]) is bad. 2024-11-25T10:31:15,993 WARN [PacketResponder: BP-1372184995-172.17.0.2-1732530662222:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35795] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:15,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2087950348_22 at /127.0.0.1:35074 [Receiving block BP-1372184995-172.17.0.2-1732530662222:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35074 dst: /127.0.0.1:35795 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:15,993 WARN [DataStreamer for file /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.meta.1732530663754.meta block BP-1372184995-172.17.0.2-1732530662222:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1372184995-172.17.0.2-1732530662222:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35795,DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7,DISK], DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35795,DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7,DISK]) is bad. 2024-11-25T10:31:15,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667331806_22 at /127.0.0.1:35034 [Receiving block BP-1372184995-172.17.0.2-1732530662222:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35034 dst: /127.0.0.1:35795 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:15,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2087950348_22 at /127.0.0.1:52242 [Receiving block BP-1372184995-172.17.0.2-1732530662222:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33621:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52242 dst: /127.0.0.1:33621 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:15,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667331806_22 at /127.0.0.1:52194 [Receiving block BP-1372184995-172.17.0.2-1732530662222:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33621:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52194 dst: /127.0.0.1:33621 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:15,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2087950348_22 at /127.0.0.1:35068 [Receiving block BP-1372184995-172.17.0.2-1732530662222:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35795:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35068 dst: /127.0.0.1:35795 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:15,994 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2087950348_22 at /127.0.0.1:52244 [Receiving block BP-1372184995-172.17.0.2-1732530662222:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33621:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52244 dst: /127.0.0.1:33621 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:15,995 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@70df7796{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:15,996 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@390ee286{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:31:15,996 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:31:15,996 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c7a3196{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:31:15,996 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a99c341{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir/,STOPPED} 2024-11-25T10:31:15,997 WARN [BP-1372184995-172.17.0.2-1732530662222 heartbeating to localhost/127.0.0.1:41637 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:31:15,997 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:31:15,997 WARN [BP-1372184995-172.17.0.2-1732530662222 heartbeating to localhost/127.0.0.1:41637 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1372184995-172.17.0.2-1732530662222 (Datanode Uuid 3635aecd-1c36-4a07-86cd-46be7fcb50be) service to localhost/127.0.0.1:41637 2024-11-25T10:31:15,997 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:31:15,998 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data3/current/BP-1372184995-172.17.0.2-1732530662222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:15,998 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data4/current/BP-1372184995-172.17.0.2-1732530662222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:15,998 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:31:16,008 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:31:16,011 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:31:16,012 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:31:16,012 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:31:16,012 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:31:16,012 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32ef6c57{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:31:16,013 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6565628b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:31:16,126 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5aaa2828{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/java.io.tmpdir/jetty-localhost-42297-hadoop-hdfs-3_4_1-tests_jar-_-any-652032062134388081/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:16,126 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@50443adc{HTTP/1.1, (http/1.1)}{localhost:42297} 2024-11-25T10:31:16,126 INFO [Time-limited test {}] server.Server(415): Started @167639ms 2024-11-25T10:31:16,128 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:31:16,149 WARN [ResponseProcessor for block BP-1372184995-172.17.0.2-1732530662222:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1372184995-172.17.0.2-1732530662222:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:16,149 WARN [ResponseProcessor for block BP-1372184995-172.17.0.2-1732530662222:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1372184995-172.17.0.2-1732530662222:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:16,149 WARN [ResponseProcessor for block BP-1372184995-172.17.0.2-1732530662222:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1372184995-172.17.0.2-1732530662222:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:16,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667331806_22 at /127.0.0.1:36240 [Receiving block BP-1372184995-172.17.0.2-1732530662222:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33621:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36240 dst: /127.0.0.1:33621 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:16,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2087950348_22 at /127.0.0.1:36252 [Receiving block BP-1372184995-172.17.0.2-1732530662222:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33621:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36252 dst: /127.0.0.1:33621 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:16,150 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2087950348_22 at /127.0.0.1:36248 [Receiving block BP-1372184995-172.17.0.2-1732530662222:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33621:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36248 dst: /127.0.0.1:33621 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:16,155 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ff32a1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:16,155 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b09b5f3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:31:16,155 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:31:16,155 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29ec962a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:31:16,155 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5485abbd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir/,STOPPED} 2024-11-25T10:31:16,156 WARN [BP-1372184995-172.17.0.2-1732530662222 heartbeating to localhost/127.0.0.1:41637 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:31:16,156 WARN [BP-1372184995-172.17.0.2-1732530662222 heartbeating to localhost/127.0.0.1:41637 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1372184995-172.17.0.2-1732530662222 (Datanode Uuid 7b2787c5-03c0-4c45-a4bd-62d731defb45) service to localhost/127.0.0.1:41637 2024-11-25T10:31:16,156 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:31:16,156 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:31:16,162 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data1/current/BP-1372184995-172.17.0.2-1732530662222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:16,163 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data2/current/BP-1372184995-172.17.0.2-1732530662222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:16,163 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:31:16,170 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:31:16,173 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:31:16,174 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:31:16,174 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:31:16,174 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:31:16,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f264700{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:31:16,175 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dc5f7b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:31:16,228 WARN [Thread-1335 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:31:16,231 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6eabda4e08c00df7 with lease ID 0xdac11801f85f1a17: from storage DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7 node DatanodeRegistration(127.0.0.1:33335, datanodeUuid=3635aecd-1c36-4a07-86cd-46be7fcb50be, infoPort=42157, infoSecurePort=0, ipcPort=43391, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T10:31:16,231 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6eabda4e08c00df7 with lease ID 0xdac11801f85f1a17: from storage DS-9f9fc9ec-e253-4b30-9b94-9a569805a8e7 node DatanodeRegistration(127.0.0.1:33335, datanodeUuid=3635aecd-1c36-4a07-86cd-46be7fcb50be, infoPort=42157, infoSecurePort=0, ipcPort=43391, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:31:16,289 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@12096ccf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/java.io.tmpdir/jetty-localhost-46345-hadoop-hdfs-3_4_1-tests_jar-_-any-12968375017359532357/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:16,289 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@160e2053{HTTP/1.1, (http/1.1)}{localhost:46345} 2024-11-25T10:31:16,289 INFO [Time-limited test {}] server.Server(415): Started @167801ms 2024-11-25T10:31:16,290 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:31:16,383 WARN [Thread-1366 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:31:16,386 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbec99974f860565a with lease ID 0xdac11801f85f1a18: from storage DS-59eba3af-66fd-4286-b40d-73839eeaf9a8 node DatanodeRegistration(127.0.0.1:38223, datanodeUuid=7b2787c5-03c0-4c45-a4bd-62d731defb45, infoPort=44041, infoSecurePort=0, ipcPort=39201, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T10:31:16,386 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbec99974f860565a with lease ID 0xdac11801f85f1a18: from storage DS-08943d17-775e-4a78-b7a9-2c4a0248b017 node DatanodeRegistration(127.0.0.1:38223, datanodeUuid=7b2787c5-03c0-4c45-a4bd-62d731defb45, infoPort=44041, infoSecurePort=0, ipcPort=39201, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:31:16,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:16,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:16,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:17,308 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-25T10:31:17,310 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-25T10:31:17,312 ERROR [FSHLog-0-hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79-prefix:e2fd862922a0,38959,1732530662996 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:17,312 WARN [FSHLog-0-hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79-prefix:e2fd862922a0,38959,1732530662996 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:17,312 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C38959%2C1732530662996:(num 1732530663383) roll requested 2024-11-25T10:31:17,312 INFO [regionserver/e2fd862922a0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C38959%2C1732530662996.1732530677312 2024-11-25T10:31:17,318 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 newFile=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 2024-11-25T10:31:17,318 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:17,318 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:17,318 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:17,318 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:17,318 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:17,319 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 2024-11-25T10:31:17,320 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:17,320 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:17,320 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 2024-11-25T10:31:17,320 WARN [IPC Server handler 1 on default port 41637 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-25T10:31:17,320 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 after 0ms 2024-11-25T10:31:17,324 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42157:42157),(127.0.0.1/127.0.0.1:44041:44041)] 2024-11-25T10:31:17,324 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 is not closed yet, will try archiving it next time 2024-11-25T10:31:17,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:17,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:17,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:18,230 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-25T10:31:18,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:18,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:18,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:19,328 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-25T10:31:19,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:19,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:19,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:20,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:20,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:20,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:21,321 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 after 4001ms 2024-11-25T10:31:21,330 WARN [ResponseProcessor for block BP-1372184995-172.17.0.2-1732530662222:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1372184995-172.17.0.2-1732530662222:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1372184995-172.17.0.2-1732530662222:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:38223,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:21,331 WARN [DataStreamer for file /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 block BP-1372184995-172.17.0.2-1732530662222:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1372184995-172.17.0.2-1732530662222:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33335,DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7,DISK], DatanodeInfoWithStorage[127.0.0.1:38223,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38223,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]) is bad. 2024-11-25T10:31:21,331 WARN [PacketResponder: BP-1372184995-172.17.0.2-1732530662222:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38223] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:21,331 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2087950348_22 at /127.0.0.1:52448 [Receiving block BP-1372184995-172.17.0.2-1732530662222:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33335:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52448 dst: /127.0.0.1:33335 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:21,331 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2087950348_22 at /127.0.0.1:50412 [Receiving block BP-1372184995-172.17.0.2-1732530662222:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:38223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50412 dst: /127.0.0.1:38223 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:21,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@12096ccf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:21,333 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@160e2053{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:31:21,333 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:31:21,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dc5f7b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:31:21,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f264700{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir/,STOPPED} 2024-11-25T10:31:21,335 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:31:21,335 WARN [BP-1372184995-172.17.0.2-1732530662222 heartbeating to localhost/127.0.0.1:41637 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:31:21,335 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:31:21,335 WARN [BP-1372184995-172.17.0.2-1732530662222 heartbeating to localhost/127.0.0.1:41637 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1372184995-172.17.0.2-1732530662222 (Datanode Uuid 7b2787c5-03c0-4c45-a4bd-62d731defb45) service to localhost/127.0.0.1:41637 2024-11-25T10:31:21,335 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data1/current/BP-1372184995-172.17.0.2-1732530662222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:21,335 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data2/current/BP-1372184995-172.17.0.2-1732530662222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:21,336 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:31:21,343 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:31:21,346 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:31:21,347 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:31:21,347 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:31:21,347 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T10:31:21,349 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2460467f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:31:21,349 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75f58649{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:31:21,464 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c5aa216{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/java.io.tmpdir/jetty-localhost-42517-hadoop-hdfs-3_4_1-tests_jar-_-any-3080509787981865605/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:21,464 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@46332285{HTTP/1.1, (http/1.1)}{localhost:42517} 2024-11-25T10:31:21,464 INFO [Time-limited test {}] server.Server(415): Started @172977ms 2024-11-25T10:31:21,466 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:31:21,520 WARN [ResponseProcessor for block BP-1372184995-172.17.0.2-1732530662222:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1372184995-172.17.0.2-1732530662222:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:21,521 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2087950348_22 at /127.0.0.1:55196 [Receiving block BP-1372184995-172.17.0.2-1732530662222:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:33335:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55196 dst: /127.0.0.1:33335 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:21,526 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5aaa2828{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:21,526 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@50443adc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:31:21,526 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:31:21,527 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6565628b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:31:21,527 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32ef6c57{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir/,STOPPED} 2024-11-25T10:31:21,528 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:31:21,528 WARN [BP-1372184995-172.17.0.2-1732530662222 heartbeating to localhost/127.0.0.1:41637 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:31:21,528 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:31:21,528 WARN [BP-1372184995-172.17.0.2-1732530662222 heartbeating to localhost/127.0.0.1:41637 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1372184995-172.17.0.2-1732530662222 (Datanode Uuid 3635aecd-1c36-4a07-86cd-46be7fcb50be) service to localhost/127.0.0.1:41637 2024-11-25T10:31:21,529 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data3/current/BP-1372184995-172.17.0.2-1732530662222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:21,529 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data4/current/BP-1372184995-172.17.0.2-1732530662222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:21,529 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:31:21,546 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:31:21,550 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:31:21,551 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:31:21,551 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:31:21,551 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T10:31:21,552 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75ed142f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:31:21,552 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ede944f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:31:21,590 WARN [Thread-1409 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:31:21,593 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2ae9c1b567e18805 with lease ID 0xdac11801f85f1a19: from storage DS-59eba3af-66fd-4286-b40d-73839eeaf9a8 node DatanodeRegistration(127.0.0.1:42815, datanodeUuid=7b2787c5-03c0-4c45-a4bd-62d731defb45, infoPort=42987, infoSecurePort=0, ipcPort=43209, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T10:31:21,593 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2ae9c1b567e18805 with lease ID 0xdac11801f85f1a19: from storage DS-08943d17-775e-4a78-b7a9-2c4a0248b017 node DatanodeRegistration(127.0.0.1:42815, datanodeUuid=7b2787c5-03c0-4c45-a4bd-62d731defb45, infoPort=42987, infoSecurePort=0, ipcPort=43209, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:31:21,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:21,667 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7180ac25{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/java.io.tmpdir/jetty-localhost-39257-hadoop-hdfs-3_4_1-tests_jar-_-any-16576098113599376823/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:21,668 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@248d0d6a{HTTP/1.1, (http/1.1)}{localhost:39257} 2024-11-25T10:31:21,668 INFO [Time-limited test {}] server.Server(415): Started @173180ms 2024-11-25T10:31:21,669 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:31:21,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:21,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:21,753 WARN [Thread-1440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:31:21,755 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd6f044dfae7f88a6 with lease ID 0xdac11801f85f1a1a: from storage DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7 node DatanodeRegistration(127.0.0.1:41901, datanodeUuid=3635aecd-1c36-4a07-86cd-46be7fcb50be, infoPort=36759, infoSecurePort=0, ipcPort=40647, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:31:21,755 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd6f044dfae7f88a6 with lease ID 0xdac11801f85f1a1a: from storage DS-9f9fc9ec-e253-4b30-9b94-9a569805a8e7 node DatanodeRegistration(127.0.0.1:41901, datanodeUuid=3635aecd-1c36-4a07-86cd-46be7fcb50be, infoPort=36759, infoSecurePort=0, ipcPort=40647, storageInfo=lv=-57;cid=testClusterID;nsid=839870102;c=1732530662222), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:31:22,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:22,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:22,687 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-25T10:31:22,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:22,689 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-25T10:31:22,690 ERROR [FSHLog-0-hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79-prefix:e2fd862922a0,38959,1732530662996 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33335,DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:22,690 WARN [FSHLog-0-hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79-prefix:e2fd862922a0,38959,1732530662996 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33335,DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:22,690 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C38959%2C1732530662996:(num 1732530677312) roll requested 2024-11-25T10:31:22,691 INFO [regionserver/e2fd862922a0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C38959%2C1732530662996.1732530682690 2024-11-25T10:31:22,696 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 newFile=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530682690 2024-11-25T10:31:22,696 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:22,696 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:22,696 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:22,696 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:22,696 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:22,696 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530682690 2024-11-25T10:31:22,697 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33335,DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:22,697 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33335,DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:22,697 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 2024-11-25T10:31:22,697 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42987:42987),(127.0.0.1/127.0.0.1:36759:36759)] 2024-11-25T10:31:22,697 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 is not closed yet, will try archiving it next time 2024-11-25T10:31:22,697 WARN [IPC Server handler 1 on default port 41637 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-25T10:31:22,697 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 after 0ms 2024-11-25T10:31:23,592 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-25T10:31:23,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:23,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:23,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:24,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:24,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:24,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:24,698 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C38959%2C1732530662996.1732530684698 2024-11-25T10:31:24,704 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530682690 newFile=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 2024-11-25T10:31:24,704 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:24,704 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:24,704 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:24,704 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:24,705 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:24,705 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530682690 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 2024-11-25T10:31:24,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41901 is added to blk_1073741838_1019 (size=1264) 2024-11-25T10:31:24,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42815 is added to blk_1073741838_1019 (size=1264) 2024-11-25T10:31:24,707 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 is not closed yet, will try archiving it next time 2024-11-25T10:31:24,711 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36759:36759),(127.0.0.1/127.0.0.1:42987:42987)] 2024-11-25T10:31:24,712 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 is not closed yet, will try archiving it next time 2024-11-25T10:31:24,712 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 2024-11-25T10:31:24,712 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 2024-11-25T10:31:24,712 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 after 0ms 2024-11-25T10:31:24,712 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 2024-11-25T10:31:24,722 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732530664281/Put/vlen=218/seqid=0] 2024-11-25T10:31:24,722 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732530673990/Put/vlen=1045/seqid=0] 2024-11-25T10:31:24,723 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530663383 2024-11-25T10:31:24,723 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 2024-11-25T10:31:24,723 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 2024-11-25T10:31:24,723 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 after 0ms 2024-11-25T10:31:24,723 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 2024-11-25T10:31:24,726 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732530677312/Put/vlen=1045/seqid=0] 2024-11-25T10:31:24,726 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732530679329/Put/vlen=1045/seqid=0] 2024-11-25T10:31:24,726 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 2024-11-25T10:31:24,726 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530682690 2024-11-25T10:31:24,726 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530682690 2024-11-25T10:31:24,727 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530682690 after 1ms 2024-11-25T10:31:24,727 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530682690 2024-11-25T10:31:24,729 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732530682690/Put/vlen=1045/seqid=0] 2024-11-25T10:31:24,730 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 2024-11-25T10:31:24,730 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 2024-11-25T10:31:24,730 WARN [IPC Server handler 2 on default port 41637 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-25T10:31:24,730 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 after 0ms 2024-11-25T10:31:25,596 WARN [ResponseProcessor for block BP-1372184995-172.17.0.2-1732530662222:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1372184995-172.17.0.2-1732530662222:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:25,596 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667331806_22 at /127.0.0.1:45428 [Receiving block BP-1372184995-172.17.0.2-1732530662222:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:41901:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45428 dst: /127.0.0.1:41901 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:41901 remote=/127.0.0.1:45428]. Total timeout mills is 60000, 59108 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:25,596 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-667331806_22 at /127.0.0.1:51668 [Receiving block BP-1372184995-172.17.0.2-1732530662222:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42815:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51668 dst: /127.0.0.1:42815 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:25,596 WARN [DataStreamer for file /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 block BP-1372184995-172.17.0.2-1732530662222:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1372184995-172.17.0.2-1732530662222:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41901,DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7,DISK], DatanodeInfoWithStorage[127.0.0.1:42815,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41901,DS-a5e9f603-795c-47bd-a3a4-0c9e660cfff7,DISK]) is bad. 2024-11-25T10:31:25,597 WARN [DataStreamer for file /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 block BP-1372184995-172.17.0.2-1732530662222:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1372184995-172.17.0.2-1732530662222:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:25,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41901 is added to blk_1073741839_1022 (size=85) 2024-11-25T10:31:25,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:25,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:25,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:26,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:26,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:26,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:26,698 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530677312 after 4001ms 2024-11-25T10:31:27,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:27,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:27,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:28,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:28,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:28,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:28,731 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 after 4001ms 2024-11-25T10:31:28,731 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 2024-11-25T10:31:28,735 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 2024-11-25T10:31:28,735 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-25T10:31:28,735 ERROR [FSHLog-0-hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79-prefix:e2fd862922a0,38959,1732530662996.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:28,736 WARN [FSHLog-0-hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79-prefix:e2fd862922a0,38959,1732530662996.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:28,736 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C38959%2C1732530662996.meta:.meta(num 1732530663754) roll requested 2024-11-25T10:31:28,736 INFO [regionserver/e2fd862922a0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C38959%2C1732530662996.meta.1732530688736.meta 2024-11-25T10:31:28,742 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:28,742 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:28,742 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:28,742 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:28,742 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:28,742 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.meta.1732530663754.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.meta.1732530688736.meta 2024-11-25T10:31:28,742 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:28,743 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:28,743 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.meta.1732530663754.meta 2024-11-25T10:31:28,743 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36759:36759),(127.0.0.1/127.0.0.1:42987:42987)] 2024-11-25T10:31:28,743 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.meta.1732530663754.meta is not closed yet, will try archiving it next time 2024-11-25T10:31:28,743 WARN [IPC Server handler 4 on default port 41637 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.meta.1732530663754.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-11-25T10:31:28,743 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.meta.1732530663754.meta after 0ms 2024-11-25T10:31:28,759 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/.tmp/info/3f0a6c880f8e43d7b9c477bed17b06f4 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf./info:regioninfo/1732530664285/Put/seqid=0 2024-11-25T10:31:28,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41901 is added to blk_1073741841_1025 (size=7125) 2024-11-25T10:31:28,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42815 is added to blk_1073741841_1025 (size=7125) 2024-11-25T10:31:28,764 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/.tmp/info/3f0a6c880f8e43d7b9c477bed17b06f4 2024-11-25T10:31:28,783 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/.tmp/ns/8004da9de6a843d1beac8ee75c67027b is 43, key is default/ns:d/1732530663802/Put/seqid=0 2024-11-25T10:31:28,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41901 is added to blk_1073741842_1026 (size=5153) 2024-11-25T10:31:28,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42815 is added to blk_1073741842_1026 (size=5153) 2024-11-25T10:31:28,787 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/.tmp/ns/8004da9de6a843d1beac8ee75c67027b 2024-11-25T10:31:28,805 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/.tmp/table/723a87ea7aa4459e927bc17691f3af91 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732530664295/Put/seqid=0 2024-11-25T10:31:28,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42815 is added to blk_1073741843_1027 (size=5438) 2024-11-25T10:31:28,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41901 is added to blk_1073741843_1027 (size=5438) 2024-11-25T10:31:28,810 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/.tmp/table/723a87ea7aa4459e927bc17691f3af91 2024-11-25T10:31:28,815 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/.tmp/info/3f0a6c880f8e43d7b9c477bed17b06f4 as hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/info/3f0a6c880f8e43d7b9c477bed17b06f4 2024-11-25T10:31:28,821 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/info/3f0a6c880f8e43d7b9c477bed17b06f4, entries=10, sequenceid=11, filesize=7.0 K 2024-11-25T10:31:28,821 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/.tmp/ns/8004da9de6a843d1beac8ee75c67027b as hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/ns/8004da9de6a843d1beac8ee75c67027b 2024-11-25T10:31:28,826 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/ns/8004da9de6a843d1beac8ee75c67027b, entries=2, sequenceid=11, filesize=5.0 K 2024-11-25T10:31:28,827 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/.tmp/table/723a87ea7aa4459e927bc17691f3af91 as hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/table/723a87ea7aa4459e927bc17691f3af91 2024-11-25T10:31:28,831 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/table/723a87ea7aa4459e927bc17691f3af91, entries=2, sequenceid=11, filesize=5.3 K 2024-11-25T10:31:28,833 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 97ms, sequenceid=11, compaction requested=false 2024-11-25T10:31:28,833 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-25T10:31:28,833 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 2daf7a55b6b3eca8ca14b1de86eb6baf 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-25T10:31:28,833 ERROR [FSHLog-0-hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79-prefix:e2fd862922a0,38959,1732530662996 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1372184995-172.17.0.2-1732530662222:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:28,834 WARN [FSHLog-0-hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79-prefix:e2fd862922a0,38959,1732530662996 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1372184995-172.17.0.2-1732530662222:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:28,834 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C38959%2C1732530662996:(num 1732530684698) roll requested 2024-11-25T10:31:28,834 INFO [regionserver/e2fd862922a0:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C38959%2C1732530662996.1732530688834 2024-11-25T10:31:28,838 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 newFile=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530688834 2024-11-25T10:31:28,839 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:28,839 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:28,839 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:28,839 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:28,839 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:28,839 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530688834 2024-11-25T10:31:28,839 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1372184995-172.17.0.2-1732530662222:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:28,840 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1372184995-172.17.0.2-1732530662222:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:28,840 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42987:42987),(127.0.0.1/127.0.0.1:36759:36759)] 2024-11-25T10:31:28,840 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 2024-11-25T10:31:28,840 DEBUG [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 is not closed yet, will try archiving it next time 2024-11-25T10:31:28,840 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 after 0ms 2024-11-25T10:31:28,841 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.1732530684698 to hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/oldWALs/e2fd862922a0%2C38959%2C1732530662996.1732530684698 2024-11-25T10:31:28,856 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/default/TestLogRolling-testLogRollOnPipelineRestart/2daf7a55b6b3eca8ca14b1de86eb6baf/.tmp/info/b20d8e98fdcb4ec8b581833cdba97703 is 1080, key is row1002/info:/1732530673990/Put/seqid=0 2024-11-25T10:31:28,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42815 is added to blk_1073741845_1029 (size=9270) 2024-11-25T10:31:28,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41901 is added to blk_1073741845_1029 (size=9270) 2024-11-25T10:31:28,862 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/default/TestLogRolling-testLogRollOnPipelineRestart/2daf7a55b6b3eca8ca14b1de86eb6baf/.tmp/info/b20d8e98fdcb4ec8b581833cdba97703 2024-11-25T10:31:28,868 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/default/TestLogRolling-testLogRollOnPipelineRestart/2daf7a55b6b3eca8ca14b1de86eb6baf/.tmp/info/b20d8e98fdcb4ec8b581833cdba97703 as hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/default/TestLogRolling-testLogRollOnPipelineRestart/2daf7a55b6b3eca8ca14b1de86eb6baf/info/b20d8e98fdcb4ec8b581833cdba97703 2024-11-25T10:31:28,873 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/default/TestLogRolling-testLogRollOnPipelineRestart/2daf7a55b6b3eca8ca14b1de86eb6baf/info/b20d8e98fdcb4ec8b581833cdba97703, entries=4, sequenceid=8, filesize=9.1 K 2024-11-25T10:31:28,874 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 2daf7a55b6b3eca8ca14b1de86eb6baf in 41ms, sequenceid=8, compaction requested=false 2024-11-25T10:31:28,874 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 2daf7a55b6b3eca8ca14b1de86eb6baf: 2024-11-25T10:31:28,879 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T10:31:28,879 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T10:31:28,879 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:31:28,879 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:31:28,879 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:31:28,879 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T10:31:28,880 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T10:31:28,880 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=674484573, stopped=false 2024-11-25T10:31:28,880 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e2fd862922a0,35975,1732530662946 2024-11-25T10:31:28,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:31:28,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:31:28,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:28,881 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:28,881 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T10:31:28,882 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T10:31:28,882 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:31:28,882 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:31:28,882 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:31:28,882 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:31:28,882 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2fd862922a0,38959,1732530662996' ***** 2024-11-25T10:31:28,882 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T10:31:28,883 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T10:31:28,883 INFO [RS:0;e2fd862922a0:38959 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T10:31:28,883 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T10:31:28,883 INFO [RS:0;e2fd862922a0:38959 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T10:31:28,883 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(3091): Received CLOSE for 2daf7a55b6b3eca8ca14b1de86eb6baf 2024-11-25T10:31:28,883 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(959): stopping server e2fd862922a0,38959,1732530662996 2024-11-25T10:31:28,883 INFO [RS:0;e2fd862922a0:38959 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:31:28,883 INFO [RS:0;e2fd862922a0:38959 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e2fd862922a0:38959. 2024-11-25T10:31:28,883 DEBUG [RS:0;e2fd862922a0:38959 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:31:28,883 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2daf7a55b6b3eca8ca14b1de86eb6baf, disabling compactions & flushes 2024-11-25T10:31:28,883 DEBUG [RS:0;e2fd862922a0:38959 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:31:28,883 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. 2024-11-25T10:31:28,883 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. 2024-11-25T10:31:28,883 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T10:31:28,883 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. after waiting 0 ms 2024-11-25T10:31:28,883 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T10:31:28,883 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T10:31:28,883 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. 2024-11-25T10:31:28,883 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T10:31:28,884 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-25T10:31:28,884 DEBUG [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 2daf7a55b6b3eca8ca14b1de86eb6baf=TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf.} 2024-11-25T10:31:28,884 DEBUG [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 2daf7a55b6b3eca8ca14b1de86eb6baf 2024-11-25T10:31:28,884 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T10:31:28,884 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T10:31:28,884 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T10:31:28,884 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T10:31:28,884 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T10:31:28,889 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/default/TestLogRolling-testLogRollOnPipelineRestart/2daf7a55b6b3eca8ca14b1de86eb6baf/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-25T10:31:28,889 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. 2024-11-25T10:31:28,890 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2daf7a55b6b3eca8ca14b1de86eb6baf: Waiting for close lock at 1732530688883Running coprocessor pre-close hooks at 1732530688883Disabling compacts and flushes for region at 1732530688883Disabling writes for close at 1732530688883Writing region close event to WAL at 1732530688884 (+1 ms)Running coprocessor post-close hooks at 1732530688889 (+5 ms)Closed at 1732530688889 2024-11-25T10:31:28,890 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732530663927.2daf7a55b6b3eca8ca14b1de86eb6baf. 2024-11-25T10:31:28,892 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-25T10:31:28,892 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T10:31:28,893 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T10:31:28,893 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732530688884Running coprocessor pre-close hooks at 1732530688884Disabling compacts and flushes for region at 1732530688884Disabling writes for close at 1732530688884Writing region close event to WAL at 1732530688889 (+5 ms)Running coprocessor post-close hooks at 1732530688892 (+3 ms)Closed at 1732530688893 (+1 ms) 2024-11-25T10:31:28,893 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T10:31:29,084 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(976): stopping server e2fd862922a0,38959,1732530662996; all regions closed. 2024-11-25T10:31:29,085 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:29,085 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:29,085 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:29,085 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:29,085 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:29,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42815 is added to blk_1073741840_1023 (size=825) 2024-11-25T10:31:29,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41901 is added to blk_1073741840_1023 (size=825) 2024-11-25T10:31:29,250 INFO [regionserver/e2fd862922a0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:31:29,324 INFO [regionserver/e2fd862922a0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-25T10:31:29,324 INFO [regionserver/e2fd862922a0:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-25T10:31:29,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:29,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:29,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:30,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:30,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:30,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:31,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:31,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:31,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:32,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:32,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:32,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:32,744 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.meta.1732530663754.meta after 4001ms 2024-11-25T10:31:32,744 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/WALs/e2fd862922a0,38959,1732530662996/e2fd862922a0%2C38959%2C1732530662996.meta.1732530663754.meta to hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/oldWALs/e2fd862922a0%2C38959%2C1732530662996.meta.1732530663754.meta 2024-11-25T10:31:32,747 DEBUG [RS:0;e2fd862922a0:38959 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/oldWALs 2024-11-25T10:31:32,747 INFO [RS:0;e2fd862922a0:38959 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C38959%2C1732530662996.meta:.meta(num 1732530688736) 2024-11-25T10:31:32,747 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,747 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,748 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,748 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,748 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41901 is added to blk_1073741844_1028 (size=1162) 2024-11-25T10:31:32,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42815 is added to blk_1073741844_1028 (size=1162) 2024-11-25T10:31:32,754 DEBUG [RS:0;e2fd862922a0:38959 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/oldWALs 2024-11-25T10:31:32,754 INFO [RS:0;e2fd862922a0:38959 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C38959%2C1732530662996:(num 1732530688834) 2024-11-25T10:31:32,754 DEBUG [RS:0;e2fd862922a0:38959 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:31:32,754 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:31:32,754 INFO [RS:0;e2fd862922a0:38959 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:31:32,754 INFO [RS:0;e2fd862922a0:38959 {}] hbase.ChoreService(370): Chore service for: regionserver/e2fd862922a0:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T10:31:32,754 INFO [RS:0;e2fd862922a0:38959 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:31:32,754 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:31:32,754 INFO [RS:0;e2fd862922a0:38959 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38959 2024-11-25T10:31:32,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:31:32,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2fd862922a0,38959,1732530662996 2024-11-25T10:31:32,757 INFO [RS:0;e2fd862922a0:38959 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:31:32,757 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-25T10:31:32,758 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2fd862922a0,38959,1732530662996] 2024-11-25T10:31:32,761 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2fd862922a0,38959,1732530662996 already deleted, retry=false 2024-11-25T10:31:32,761 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2fd862922a0,38959,1732530662996 expired; onlineServers=0 2024-11-25T10:31:32,761 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e2fd862922a0,35975,1732530662946' ***** 2024-11-25T10:31:32,761 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T10:31:32,761 INFO [M:0;e2fd862922a0:35975 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:31:32,761 INFO [M:0;e2fd862922a0:35975 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:31:32,761 DEBUG [M:0;e2fd862922a0:35975 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T10:31:32,761 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T10:31:32,761 DEBUG [M:0;e2fd862922a0:35975 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T10:31:32,761 DEBUG [master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530663148 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530663148,5,FailOnTimeoutGroup] 2024-11-25T10:31:32,761 DEBUG [master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530663150 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530663150,5,FailOnTimeoutGroup] 2024-11-25T10:31:32,761 INFO [M:0;e2fd862922a0:35975 {}] hbase.ChoreService(370): Chore service for: master/e2fd862922a0:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T10:31:32,761 INFO [M:0;e2fd862922a0:35975 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:31:32,762 DEBUG [M:0;e2fd862922a0:35975 {}] master.HMaster(1795): Stopping service threads 2024-11-25T10:31:32,762 INFO [M:0;e2fd862922a0:35975 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T10:31:32,762 INFO [M:0;e2fd862922a0:35975 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T10:31:32,762 INFO [M:0;e2fd862922a0:35975 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T10:31:32,762 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T10:31:32,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T10:31:32,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:32,763 DEBUG [M:0;e2fd862922a0:35975 {}] zookeeper.ZKUtil(347): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T10:31:32,763 WARN [M:0;e2fd862922a0:35975 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T10:31:32,763 INFO [M:0;e2fd862922a0:35975 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/.lastflushedseqids 2024-11-25T10:31:32,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42815 is added to blk_1073741846_1030 (size=139) 2024-11-25T10:31:32,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41901 is added to blk_1073741846_1030 (size=139) 2024-11-25T10:31:32,769 INFO [M:0;e2fd862922a0:35975 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T10:31:32,769 INFO [M:0;e2fd862922a0:35975 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T10:31:32,769 DEBUG [M:0;e2fd862922a0:35975 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T10:31:32,769 INFO [M:0;e2fd862922a0:35975 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:32,769 DEBUG [M:0;e2fd862922a0:35975 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:32,769 DEBUG [M:0;e2fd862922a0:35975 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T10:31:32,769 DEBUG [M:0;e2fd862922a0:35975 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:32,769 INFO [M:0;e2fd862922a0:35975 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-25T10:31:32,770 ERROR [FSHLog-0-hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData-prefix:e2fd862922a0,35975,1732530662946 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:32,770 WARN [FSHLog-0-hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData-prefix:e2fd862922a0,35975,1732530662946 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:32,770 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog e2fd862922a0%2C35975%2C1732530662946:(num 1732530663078) roll requested 2024-11-25T10:31:32,770 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C35975%2C1732530662946.1732530692770 2024-11-25T10:31:32,774 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,774 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,774 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,775 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,775 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,775 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/WALs/e2fd862922a0,35975,1732530662946/e2fd862922a0%2C35975%2C1732530662946.1732530663078 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/WALs/e2fd862922a0,35975,1732530662946/e2fd862922a0%2C35975%2C1732530662946.1732530692770 2024-11-25T10:31:32,775 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:32,775 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33621,DS-59eba3af-66fd-4286-b40d-73839eeaf9a8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-25T10:31:32,775 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/WALs/e2fd862922a0,35975,1732530662946/e2fd862922a0%2C35975%2C1732530662946.1732530663078 2024-11-25T10:31:32,776 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36759:36759),(127.0.0.1/127.0.0.1:42987:42987)] 2024-11-25T10:31:32,776 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/WALs/e2fd862922a0,35975,1732530662946/e2fd862922a0%2C35975%2C1732530662946.1732530663078 is not closed yet, will try archiving it next time 2024-11-25T10:31:32,776 WARN [IPC Server handler 0 on default port 41637 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/WALs/e2fd862922a0,35975,1732530662946/e2fd862922a0%2C35975%2C1732530662946.1732530663078 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-25T10:31:32,776 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/WALs/e2fd862922a0,35975,1732530662946/e2fd862922a0%2C35975%2C1732530662946.1732530663078 after 1ms 2024-11-25T10:31:32,791 DEBUG [M:0;e2fd862922a0:35975 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a05f90385f1f4404a705bebf760542b0 is 82, key is hbase:meta,,1/info:regioninfo/1732530663787/Put/seqid=0 2024-11-25T10:31:32,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41901 is added to blk_1073741848_1033 (size=5672) 2024-11-25T10:31:32,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42815 is added to blk_1073741848_1033 (size=5672) 2024-11-25T10:31:32,796 INFO [M:0;e2fd862922a0:35975 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a05f90385f1f4404a705bebf760542b0 2024-11-25T10:31:32,814 DEBUG [M:0;e2fd862922a0:35975 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7dc857ea471b40eebc391747f9ef0b36 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732530664299/Put/seqid=0 2024-11-25T10:31:32,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41901 is added to blk_1073741849_1034 (size=6119) 2024-11-25T10:31:32,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42815 is added to blk_1073741849_1034 (size=6119) 2024-11-25T10:31:32,819 INFO [M:0;e2fd862922a0:35975 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7dc857ea471b40eebc391747f9ef0b36 2024-11-25T10:31:32,838 DEBUG [M:0;e2fd862922a0:35975 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8ea6d966d00340fbacbc3eabdc706cf6 is 69, key is e2fd862922a0,38959,1732530662996/rs:state/1732530663231/Put/seqid=0 2024-11-25T10:31:32,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41901 is added to blk_1073741850_1035 (size=5156) 2024-11-25T10:31:32,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42815 is added to blk_1073741850_1035 (size=5156) 2024-11-25T10:31:32,843 INFO [M:0;e2fd862922a0:35975 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8ea6d966d00340fbacbc3eabdc706cf6 2024-11-25T10:31:32,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:31:32,858 INFO [RS:0;e2fd862922a0:38959 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:31:32,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38959-0x1014eadd89d0001, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:31:32,858 INFO [RS:0;e2fd862922a0:38959 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2fd862922a0,38959,1732530662996; zookeeper connection closed. 2024-11-25T10:31:32,859 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@82c3460 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@82c3460 2024-11-25T10:31:32,859 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T10:31:32,862 DEBUG [M:0;e2fd862922a0:35975 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/828fd45a7d374bf88eed1310d0ab7bf6 is 52, key is load_balancer_on/state:d/1732530663923/Put/seqid=0 2024-11-25T10:31:32,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41901 is added to blk_1073741851_1036 (size=5056) 2024-11-25T10:31:32,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42815 is added to blk_1073741851_1036 (size=5056) 2024-11-25T10:31:32,867 INFO [M:0;e2fd862922a0:35975 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/828fd45a7d374bf88eed1310d0ab7bf6 2024-11-25T10:31:32,873 DEBUG [M:0;e2fd862922a0:35975 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a05f90385f1f4404a705bebf760542b0 as hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a05f90385f1f4404a705bebf760542b0 2024-11-25T10:31:32,878 INFO [M:0;e2fd862922a0:35975 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a05f90385f1f4404a705bebf760542b0, entries=8, sequenceid=56, filesize=5.5 K 2024-11-25T10:31:32,879 DEBUG [M:0;e2fd862922a0:35975 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7dc857ea471b40eebc391747f9ef0b36 as hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7dc857ea471b40eebc391747f9ef0b36 2024-11-25T10:31:32,884 INFO [M:0;e2fd862922a0:35975 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7dc857ea471b40eebc391747f9ef0b36, entries=6, sequenceid=56, filesize=6.0 K 2024-11-25T10:31:32,884 DEBUG [M:0;e2fd862922a0:35975 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8ea6d966d00340fbacbc3eabdc706cf6 as hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8ea6d966d00340fbacbc3eabdc706cf6 2024-11-25T10:31:32,889 INFO [M:0;e2fd862922a0:35975 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8ea6d966d00340fbacbc3eabdc706cf6, entries=1, sequenceid=56, filesize=5.0 K 2024-11-25T10:31:32,890 DEBUG [M:0;e2fd862922a0:35975 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/828fd45a7d374bf88eed1310d0ab7bf6 as hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/828fd45a7d374bf88eed1310d0ab7bf6 2024-11-25T10:31:32,894 INFO [M:0;e2fd862922a0:35975 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/828fd45a7d374bf88eed1310d0ab7bf6, entries=1, sequenceid=56, filesize=4.9 K 2024-11-25T10:31:32,896 INFO [M:0;e2fd862922a0:35975 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=56, compaction requested=false 2024-11-25T10:31:32,897 INFO [M:0;e2fd862922a0:35975 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:32,897 DEBUG [M:0;e2fd862922a0:35975 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732530692769Disabling compacts and flushes for region at 1732530692769Disabling writes for close at 1732530692769Obtaining lock to block concurrent updates at 1732530692769Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732530692769Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732530692770 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732530692776 (+6 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732530692776Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732530692790 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732530692791 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732530692800 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732530692813 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732530692813Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732530692824 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732530692837 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732530692837Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732530692848 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732530692861 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732530692861Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76ee6f11: reopening flushed file at 1732530692873 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ccd7a0d: reopening flushed file at 1732530692878 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6840576f: reopening flushed file at 1732530692884 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@544ab8db: reopening flushed file at 1732530692889 (+5 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=56, compaction requested=false at 1732530692896 (+7 ms)Writing region close event to WAL at 1732530692897 (+1 ms)Closed at 1732530692897 2024-11-25T10:31:32,898 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,898 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,898 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,898 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,898 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:31:32,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42815 is added to blk_1073741847_1031 (size=757) 2024-11-25T10:31:32,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41901 is added to blk_1073741847_1031 (size=757) 2024-11-25T10:31:32,923 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T10:31:33,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:33,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 after 68048ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:33,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:33,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:33,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:33,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:33,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:33,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:33,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:33,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:33,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:33,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:33,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:33,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:33,917 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:33,919 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:33,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:33,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,424 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T10:31:34,425 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,426 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,426 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,426 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,446 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:34,450 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T10:31:34,450 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T10:31:34,450 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-25T10:31:34,450 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-25T10:31:34,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:34,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:34,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:35,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:35,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:35,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:35,757 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-25T10:31:36,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:36,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:36,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:36,776 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/WALs/e2fd862922a0,35975,1732530662946/e2fd862922a0%2C35975%2C1732530662946.1732530663078 after 4001ms 2024-11-25T10:31:36,777 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/WALs/e2fd862922a0,35975,1732530662946/e2fd862922a0%2C35975%2C1732530662946.1732530663078 to hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/oldWALs/e2fd862922a0%2C35975%2C1732530662946.1732530663078 2024-11-25T10:31:36,780 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/MasterData/oldWALs/e2fd862922a0%2C35975%2C1732530662946.1732530663078 to hdfs://localhost:41637/user/jenkins/test-data/512af37d-1b34-9ad7-67a0-da58805bae79/oldWALs/e2fd862922a0%2C35975%2C1732530662946.1732530663078$masterlocalwal$ 2024-11-25T10:31:36,780 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:31:36,780 INFO [M:0;e2fd862922a0:35975 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T10:31:36,781 INFO [M:0;e2fd862922a0:35975 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35975 2024-11-25T10:31:36,781 INFO [M:0;e2fd862922a0:35975 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:31:36,882 INFO [M:0;e2fd862922a0:35975 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:31:36,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:31:36,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35975-0x1014eadd89d0000, quorum=127.0.0.1:56140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:31:36,885 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7180ac25{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:36,885 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@248d0d6a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:31:36,885 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:31:36,885 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ede944f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:31:36,885 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75ed142f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir/,STOPPED} 2024-11-25T10:31:36,887 WARN [BP-1372184995-172.17.0.2-1732530662222 heartbeating to localhost/127.0.0.1:41637 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:31:36,887 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:31:36,887 WARN [BP-1372184995-172.17.0.2-1732530662222 heartbeating to localhost/127.0.0.1:41637 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1372184995-172.17.0.2-1732530662222 (Datanode Uuid 3635aecd-1c36-4a07-86cd-46be7fcb50be) service to localhost/127.0.0.1:41637 2024-11-25T10:31:36,887 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:31:36,887 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data3/current/BP-1372184995-172.17.0.2-1732530662222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:36,888 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data4/current/BP-1372184995-172.17.0.2-1732530662222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:36,888 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:31:36,892 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c5aa216{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:36,893 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@46332285{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:31:36,893 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:31:36,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75f58649{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:31:36,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2460467f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir/,STOPPED} 2024-11-25T10:31:36,894 WARN [BP-1372184995-172.17.0.2-1732530662222 heartbeating to localhost/127.0.0.1:41637 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:31:36,894 WARN [BP-1372184995-172.17.0.2-1732530662222 heartbeating to localhost/127.0.0.1:41637 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1372184995-172.17.0.2-1732530662222 (Datanode Uuid 7b2787c5-03c0-4c45-a4bd-62d731defb45) service to localhost/127.0.0.1:41637 2024-11-25T10:31:36,894 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:31:36,894 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:31:36,894 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data1/current/BP-1372184995-172.17.0.2-1732530662222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:36,895 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/cluster_00de9ea0-6700-e236-feca-99abdf5f3283/data/data2/current/BP-1372184995-172.17.0.2-1732530662222 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:31:36,895 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:31:36,900 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d60493b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T10:31:36,900 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@14bd1a08{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:31:36,901 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:31:36,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c727387{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:31:36,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48d7ddcf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir/,STOPPED} 2024-11-25T10:31:36,907 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T10:31:36,923 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T10:31:36,932 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=180 (was 155) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41637 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41637 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41637 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:41637 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41637 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:41637 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:41637 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:41637 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 440) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=71 (was 102), ProcessCount=11 (was 11), AvailableMemoryMB=1578 (was 1721) 2024-11-25T10:31:36,939 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=180, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=71, ProcessCount=11, AvailableMemoryMB=1579 2024-11-25T10:31:36,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T10:31:36,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.log.dir so I do NOT create it in target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492 2024-11-25T10:31:36,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b41dac5d-05da-4b33-f2a4-944579b9cb96/hadoop.tmp.dir so I do NOT create it in target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492 2024-11-25T10:31:36,939 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/cluster_c292f939-2c1b-58e6-c260-a895c4c67133, deleteOnExit=true 2024-11-25T10:31:36,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T10:31:36,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/test.cache.data in system properties and HBase conf 2024-11-25T10:31:36,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T10:31:36,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/hadoop.log.dir in system properties and HBase conf 2024-11-25T10:31:36,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T10:31:36,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T10:31:36,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T10:31:36,940 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T10:31:36,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T10:31:36,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T10:31:36,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T10:31:36,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T10:31:36,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T10:31:36,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T10:31:36,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T10:31:36,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T10:31:36,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T10:31:36,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/nfs.dump.dir in system properties and HBase conf 2024-11-25T10:31:36,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/java.io.tmpdir in system properties and HBase conf 2024-11-25T10:31:36,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T10:31:36,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T10:31:36,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T10:31:36,954 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T10:31:37,021 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:31:37,025 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:31:37,033 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:31:37,033 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:31:37,033 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T10:31:37,033 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:31:37,036 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bd654dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:31:37,037 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@637fc016{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:31:37,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6955024e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/java.io.tmpdir/jetty-localhost-33513-hadoop-hdfs-3_4_1-tests_jar-_-any-4307795820688068267/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T10:31:37,150 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b7f365e{HTTP/1.1, (http/1.1)}{localhost:33513} 2024-11-25T10:31:37,150 INFO [Time-limited test {}] server.Server(415): Started @188663ms 2024-11-25T10:31:37,163 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T10:31:37,214 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:31:37,217 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:31:37,218 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:31:37,218 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:31:37,218 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:31:37,219 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7934466e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:31:37,219 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1850586f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:31:37,332 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e7fac47{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/java.io.tmpdir/jetty-localhost-44985-hadoop-hdfs-3_4_1-tests_jar-_-any-11664165122937675489/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:37,332 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a3c93e{HTTP/1.1, (http/1.1)}{localhost:44985} 2024-11-25T10:31:37,332 INFO [Time-limited test {}] server.Server(415): Started @188845ms 2024-11-25T10:31:37,333 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:31:37,363 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:31:37,365 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:31:37,366 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:31:37,366 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:31:37,366 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:31:37,367 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16fb9c03{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:31:37,367 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@231eb320{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:31:37,443 WARN [Thread-1635 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/cluster_c292f939-2c1b-58e6-c260-a895c4c67133/data/data2/current/BP-1489323801-172.17.0.2-1732530696970/current, will proceed with Du for space computation calculation, 2024-11-25T10:31:37,443 WARN [Thread-1634 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/cluster_c292f939-2c1b-58e6-c260-a895c4c67133/data/data1/current/BP-1489323801-172.17.0.2-1732530696970/current, will proceed with Du for space computation calculation, 2024-11-25T10:31:37,465 WARN [Thread-1613 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:31:37,468 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf34973396e23271 with lease ID 0xa61638b4a36775c6: Processing first storage report for DS-e936be76-8a78-4f99-8679-3caa81d7dae1 from datanode DatanodeRegistration(127.0.0.1:35331, datanodeUuid=986b6cbd-05f0-4330-8bd0-63df6f447ead, infoPort=33441, infoSecurePort=0, ipcPort=34491, storageInfo=lv=-57;cid=testClusterID;nsid=892498369;c=1732530696970) 2024-11-25T10:31:37,468 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf34973396e23271 with lease ID 0xa61638b4a36775c6: from storage DS-e936be76-8a78-4f99-8679-3caa81d7dae1 node DatanodeRegistration(127.0.0.1:35331, datanodeUuid=986b6cbd-05f0-4330-8bd0-63df6f447ead, infoPort=33441, infoSecurePort=0, ipcPort=34491, storageInfo=lv=-57;cid=testClusterID;nsid=892498369;c=1732530696970), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:31:37,468 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcf34973396e23271 with lease ID 0xa61638b4a36775c6: Processing first storage report for DS-7be63cc0-3330-4d6f-9878-3ba97f004985 from datanode DatanodeRegistration(127.0.0.1:35331, datanodeUuid=986b6cbd-05f0-4330-8bd0-63df6f447ead, infoPort=33441, infoSecurePort=0, ipcPort=34491, storageInfo=lv=-57;cid=testClusterID;nsid=892498369;c=1732530696970) 2024-11-25T10:31:37,468 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcf34973396e23271 with lease ID 0xa61638b4a36775c6: from storage DS-7be63cc0-3330-4d6f-9878-3ba97f004985 node DatanodeRegistration(127.0.0.1:35331, datanodeUuid=986b6cbd-05f0-4330-8bd0-63df6f447ead, infoPort=33441, infoSecurePort=0, ipcPort=34491, storageInfo=lv=-57;cid=testClusterID;nsid=892498369;c=1732530696970), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:31:37,483 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1b5fa12b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/java.io.tmpdir/jetty-localhost-37915-hadoop-hdfs-3_4_1-tests_jar-_-any-18370362256042159303/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:31:37,484 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ecaedd6{HTTP/1.1, (http/1.1)}{localhost:37915} 2024-11-25T10:31:37,484 INFO [Time-limited test {}] server.Server(415): Started @188996ms 2024-11-25T10:31:37,485 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:31:37,588 WARN [Thread-1660 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/cluster_c292f939-2c1b-58e6-c260-a895c4c67133/data/data3/current/BP-1489323801-172.17.0.2-1732530696970/current, will proceed with Du for space computation calculation, 2024-11-25T10:31:37,588 WARN [Thread-1661 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/cluster_c292f939-2c1b-58e6-c260-a895c4c67133/data/data4/current/BP-1489323801-172.17.0.2-1732530696970/current, will proceed with Du for space computation calculation, 2024-11-25T10:31:37,604 WARN [Thread-1649 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:31:37,605 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc74dba2d3e1f7bb2 with lease ID 0xa61638b4a36775c7: Processing first storage report for DS-69c46219-60d9-4778-919d-2da752cff2aa from datanode DatanodeRegistration(127.0.0.1:37645, datanodeUuid=9100fa0a-b470-4a41-9fb3-5145d4fd8b82, infoPort=43287, infoSecurePort=0, ipcPort=33155, storageInfo=lv=-57;cid=testClusterID;nsid=892498369;c=1732530696970) 2024-11-25T10:31:37,606 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc74dba2d3e1f7bb2 with lease ID 0xa61638b4a36775c7: from storage DS-69c46219-60d9-4778-919d-2da752cff2aa node DatanodeRegistration(127.0.0.1:37645, datanodeUuid=9100fa0a-b470-4a41-9fb3-5145d4fd8b82, infoPort=43287, infoSecurePort=0, ipcPort=33155, storageInfo=lv=-57;cid=testClusterID;nsid=892498369;c=1732530696970), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:31:37,606 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc74dba2d3e1f7bb2 with lease ID 0xa61638b4a36775c7: Processing first storage report for DS-a759276f-20c1-4e1a-ae69-016095e57ad3 from datanode DatanodeRegistration(127.0.0.1:37645, datanodeUuid=9100fa0a-b470-4a41-9fb3-5145d4fd8b82, infoPort=43287, infoSecurePort=0, ipcPort=33155, storageInfo=lv=-57;cid=testClusterID;nsid=892498369;c=1732530696970) 2024-11-25T10:31:37,606 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc74dba2d3e1f7bb2 with lease ID 0xa61638b4a36775c7: from storage DS-a759276f-20c1-4e1a-ae69-016095e57ad3 node DatanodeRegistration(127.0.0.1:37645, datanodeUuid=9100fa0a-b470-4a41-9fb3-5145d4fd8b82, infoPort=43287, infoSecurePort=0, ipcPort=33155, storageInfo=lv=-57;cid=testClusterID;nsid=892498369;c=1732530696970), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:31:37,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:37,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:37,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:37,707 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492 2024-11-25T10:31:37,709 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/cluster_c292f939-2c1b-58e6-c260-a895c4c67133/zookeeper_0, clientPort=65080, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/cluster_c292f939-2c1b-58e6-c260-a895c4c67133/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/cluster_c292f939-2c1b-58e6-c260-a895c4c67133/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T10:31:37,710 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=65080 2024-11-25T10:31:37,710 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:37,711 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:37,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:31:37,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:31:37,720 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356 with version=8 2024-11-25T10:31:37,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/hbase-staging 2024-11-25T10:31:37,722 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:31:37,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:31:37,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:31:37,722 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:31:37,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:31:37,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:31:37,723 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T10:31:37,723 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:31:37,723 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44353 2024-11-25T10:31:37,724 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44353 connecting to ZooKeeper ensemble=127.0.0.1:65080 2024-11-25T10:31:37,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:443530x0, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:31:37,731 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44353-0x1014eae607c0000 connected 2024-11-25T10:31:37,743 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:37,745 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:37,746 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:31:37,746 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356, hbase.cluster.distributed=false 2024-11-25T10:31:37,748 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:31:37,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44353 2024-11-25T10:31:37,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44353 2024-11-25T10:31:37,749 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44353 2024-11-25T10:31:37,749 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44353 2024-11-25T10:31:37,749 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44353 2024-11-25T10:31:37,764 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:31:37,764 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:31:37,764 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:31:37,764 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:31:37,764 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:31:37,764 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:31:37,764 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T10:31:37,764 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:31:37,765 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38131 2024-11-25T10:31:37,766 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38131 connecting to ZooKeeper ensemble=127.0.0.1:65080 2024-11-25T10:31:37,766 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:37,768 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:37,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:381310x0, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:31:37,772 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:381310x0, quorum=127.0.0.1:65080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:31:37,772 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38131-0x1014eae607c0001 connected 2024-11-25T10:31:37,772 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T10:31:37,772 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T10:31:37,773 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T10:31:37,774 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:31:37,774 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38131 2024-11-25T10:31:37,774 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38131 2024-11-25T10:31:37,774 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38131 2024-11-25T10:31:37,775 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38131 2024-11-25T10:31:37,775 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38131 2024-11-25T10:31:37,786 DEBUG [M:0;e2fd862922a0:44353 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e2fd862922a0:44353 2024-11-25T10:31:37,786 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e2fd862922a0,44353,1732530697722 2024-11-25T10:31:37,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:31:37,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:31:37,788 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e2fd862922a0,44353,1732530697722 2024-11-25T10:31:37,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T10:31:37,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:37,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:37,791 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T10:31:37,791 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e2fd862922a0,44353,1732530697722 from backup master directory 2024-11-25T10:31:37,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e2fd862922a0,44353,1732530697722 2024-11-25T10:31:37,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:31:37,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:31:37,792 WARN [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:31:37,792 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e2fd862922a0,44353,1732530697722 2024-11-25T10:31:37,796 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/hbase.id] with ID: f9fde289-8395-4380-9817-c7f24322c925 2024-11-25T10:31:37,796 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/.tmp/hbase.id 2024-11-25T10:31:37,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741826_1002 (size=42) 2024-11-25T10:31:37,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741826_1002 (size=42) 2024-11-25T10:31:37,805 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/.tmp/hbase.id]:[hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/hbase.id] 2024-11-25T10:31:37,816 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:37,816 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T10:31:37,817 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-25T10:31:37,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:37,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:37,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:31:37,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:31:37,828 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T10:31:37,828 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T10:31:37,829 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:31:37,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741828_1004 (size=1189) 2024-11-25T10:31:37,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741828_1004 (size=1189) 2024-11-25T10:31:37,835 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store 2024-11-25T10:31:37,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741829_1005 (size=34) 2024-11-25T10:31:37,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741829_1005 (size=34) 2024-11-25T10:31:37,842 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:31:37,842 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T10:31:37,842 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:37,842 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:37,842 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T10:31:37,842 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:37,842 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:31:37,842 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732530697842Disabling compacts and flushes for region at 1732530697842Disabling writes for close at 1732530697842Writing region close event to WAL at 1732530697842Closed at 1732530697842 2024-11-25T10:31:37,843 WARN [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/.initializing 2024-11-25T10:31:37,843 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/WALs/e2fd862922a0,44353,1732530697722 2024-11-25T10:31:37,845 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C44353%2C1732530697722, suffix=, logDir=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/WALs/e2fd862922a0,44353,1732530697722, archiveDir=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/oldWALs, maxLogs=10 2024-11-25T10:31:37,845 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C44353%2C1732530697722.1732530697845 2024-11-25T10:31:37,850 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/WALs/e2fd862922a0,44353,1732530697722/e2fd862922a0%2C44353%2C1732530697722.1732530697845 2024-11-25T10:31:37,852 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43287:43287),(127.0.0.1/127.0.0.1:33441:33441)] 2024-11-25T10:31:37,853 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:31:37,853 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:31:37,853 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:37,853 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:37,854 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:37,855 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T10:31:37,855 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:37,856 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:37,856 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:37,857 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T10:31:37,857 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:37,857 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:31:37,857 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:37,858 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T10:31:37,858 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:37,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:31:37,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:37,860 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T10:31:37,860 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:37,861 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:31:37,861 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:37,861 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:37,862 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:37,863 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:37,863 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:37,864 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T10:31:37,865 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:31:37,866 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:31:37,867 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=756000, jitterRate=-0.03869730234146118}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T10:31:37,867 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732530697853Initializing all the Stores at 1732530697854 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530697854Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530697854Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530697854Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530697854Cleaning up temporary data from old regions at 1732530697863 (+9 ms)Region opened successfully at 1732530697867 (+4 ms) 2024-11-25T10:31:37,868 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T10:31:37,870 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6277e474, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:31:37,871 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T10:31:37,871 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T10:31:37,872 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T10:31:37,872 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T10:31:37,872 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T10:31:37,872 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T10:31:37,872 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T10:31:37,874 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T10:31:37,875 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T10:31:37,876 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T10:31:37,876 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T10:31:37,877 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T10:31:37,879 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T10:31:37,879 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T10:31:37,880 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T10:31:37,881 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T10:31:37,881 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T10:31:37,882 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T10:31:37,884 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T10:31:37,885 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T10:31:37,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T10:31:37,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T10:31:37,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:37,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:37,888 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e2fd862922a0,44353,1732530697722, sessionid=0x1014eae607c0000, setting cluster-up flag (Was=false) 2024-11-25T10:31:37,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:37,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:37,896 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T10:31:37,897 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2fd862922a0,44353,1732530697722 2024-11-25T10:31:37,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:37,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:37,904 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T10:31:37,905 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2fd862922a0,44353,1732530697722 2024-11-25T10:31:37,906 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T10:31:37,907 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T10:31:37,908 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T10:31:37,908 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T10:31:37,908 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e2fd862922a0,44353,1732530697722 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T10:31:37,909 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:31:37,909 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:31:37,909 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:31:37,909 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:31:37,909 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e2fd862922a0:0, corePoolSize=10, maxPoolSize=10 2024-11-25T10:31:37,909 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:37,909 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:31:37,909 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:37,910 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732530727910 2024-11-25T10:31:37,910 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T10:31:37,910 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T10:31:37,910 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T10:31:37,910 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T10:31:37,910 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T10:31:37,910 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T10:31:37,911 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:37,911 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:31:37,911 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T10:31:37,911 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T10:31:37,911 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T10:31:37,911 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T10:31:37,911 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T10:31:37,911 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T10:31:37,912 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530697911,5,FailOnTimeoutGroup] 2024-11-25T10:31:37,912 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530697912,5,FailOnTimeoutGroup] 2024-11-25T10:31:37,912 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:37,912 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T10:31:37,912 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:37,912 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:37,912 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:37,912 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T10:31:37,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:31:37,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:31:37,920 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T10:31:37,920 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356 2024-11-25T10:31:37,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:31:37,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:31:37,926 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:31:37,927 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T10:31:37,928 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T10:31:37,928 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:37,928 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:37,928 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T10:31:37,929 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T10:31:37,929 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:37,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:37,930 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T10:31:37,931 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T10:31:37,931 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:37,931 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:37,931 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T10:31:37,932 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T10:31:37,932 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:37,932 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:37,933 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T10:31:37,933 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740 2024-11-25T10:31:37,933 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740 2024-11-25T10:31:37,934 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T10:31:37,934 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T10:31:37,935 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T10:31:37,936 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T10:31:37,937 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:31:37,937 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=702441, jitterRate=-0.10680115222930908}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T10:31:37,938 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732530697926Initializing all the Stores at 1732530697926Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530697926Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530697926Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530697927 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530697927Cleaning up temporary data from old regions at 1732530697934 (+7 ms)Region opened successfully at 1732530697938 (+4 ms) 2024-11-25T10:31:37,938 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T10:31:37,938 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T10:31:37,938 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T10:31:37,938 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T10:31:37,938 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T10:31:37,939 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T10:31:37,939 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732530697938Disabling compacts and flushes for region at 1732530697938Disabling writes for close at 1732530697938Writing region close event to WAL at 1732530697939 (+1 ms)Closed at 1732530697939 2024-11-25T10:31:37,940 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:31:37,940 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T10:31:37,940 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T10:31:37,941 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T10:31:37,942 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T10:31:37,976 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(746): ClusterId : f9fde289-8395-4380-9817-c7f24322c925 2024-11-25T10:31:37,977 DEBUG [RS:0;e2fd862922a0:38131 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T10:31:37,978 DEBUG [RS:0;e2fd862922a0:38131 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T10:31:37,979 DEBUG [RS:0;e2fd862922a0:38131 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T10:31:37,981 DEBUG [RS:0;e2fd862922a0:38131 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T10:31:37,982 DEBUG [RS:0;e2fd862922a0:38131 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@452b5020, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:31:37,993 DEBUG [RS:0;e2fd862922a0:38131 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e2fd862922a0:38131 2024-11-25T10:31:37,993 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T10:31:37,993 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T10:31:37,993 DEBUG [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T10:31:37,994 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2fd862922a0,44353,1732530697722 with port=38131, startcode=1732530697764 2024-11-25T10:31:37,994 DEBUG [RS:0;e2fd862922a0:38131 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T10:31:37,996 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51939, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T10:31:37,996 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44353 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2fd862922a0,38131,1732530697764 2024-11-25T10:31:37,997 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44353 {}] master.ServerManager(517): Registering regionserver=e2fd862922a0,38131,1732530697764 2024-11-25T10:31:37,998 DEBUG [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356 2024-11-25T10:31:37,998 DEBUG [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41211 2024-11-25T10:31:37,998 DEBUG [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T10:31:38,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:31:38,000 DEBUG [RS:0;e2fd862922a0:38131 {}] zookeeper.ZKUtil(111): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2fd862922a0,38131,1732530697764 2024-11-25T10:31:38,000 WARN [RS:0;e2fd862922a0:38131 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:31:38,000 INFO [RS:0;e2fd862922a0:38131 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:31:38,000 DEBUG [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764 2024-11-25T10:31:38,001 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2fd862922a0,38131,1732530697764] 2024-11-25T10:31:38,003 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T10:31:38,005 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T10:31:38,005 INFO [RS:0;e2fd862922a0:38131 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T10:31:38,005 INFO [RS:0;e2fd862922a0:38131 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,012 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T10:31:38,012 INFO [RS:0;e2fd862922a0:38131 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T10:31:38,013 INFO [RS:0;e2fd862922a0:38131 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,013 DEBUG [RS:0;e2fd862922a0:38131 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:38,013 DEBUG [RS:0;e2fd862922a0:38131 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:38,013 DEBUG [RS:0;e2fd862922a0:38131 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:38,013 DEBUG [RS:0;e2fd862922a0:38131 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:38,013 DEBUG [RS:0;e2fd862922a0:38131 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:38,013 DEBUG [RS:0;e2fd862922a0:38131 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:31:38,013 DEBUG [RS:0;e2fd862922a0:38131 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:38,013 DEBUG [RS:0;e2fd862922a0:38131 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:38,013 DEBUG [RS:0;e2fd862922a0:38131 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:38,013 DEBUG [RS:0;e2fd862922a0:38131 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:38,013 DEBUG [RS:0;e2fd862922a0:38131 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:38,013 DEBUG [RS:0;e2fd862922a0:38131 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:31:38,013 DEBUG [RS:0;e2fd862922a0:38131 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:31:38,013 DEBUG [RS:0;e2fd862922a0:38131 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:31:38,013 INFO [RS:0;e2fd862922a0:38131 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,014 INFO [RS:0;e2fd862922a0:38131 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,014 INFO [RS:0;e2fd862922a0:38131 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,014 INFO [RS:0;e2fd862922a0:38131 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,014 INFO [RS:0;e2fd862922a0:38131 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,014 INFO [RS:0;e2fd862922a0:38131 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,38131,1732530697764-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:31:38,029 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T10:31:38,030 INFO [RS:0;e2fd862922a0:38131 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,38131,1732530697764-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,030 INFO [RS:0;e2fd862922a0:38131 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,030 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.Replication(171): e2fd862922a0,38131,1732530697764 started 2024-11-25T10:31:38,043 INFO [RS:0;e2fd862922a0:38131 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,043 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(1482): Serving as e2fd862922a0,38131,1732530697764, RpcServer on e2fd862922a0/172.17.0.2:38131, sessionid=0x1014eae607c0001 2024-11-25T10:31:38,044 DEBUG [RS:0;e2fd862922a0:38131 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T10:31:38,044 DEBUG [RS:0;e2fd862922a0:38131 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2fd862922a0,38131,1732530697764 2024-11-25T10:31:38,044 DEBUG [RS:0;e2fd862922a0:38131 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,38131,1732530697764' 2024-11-25T10:31:38,044 DEBUG [RS:0;e2fd862922a0:38131 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T10:31:38,044 DEBUG [RS:0;e2fd862922a0:38131 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T10:31:38,045 DEBUG [RS:0;e2fd862922a0:38131 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T10:31:38,045 DEBUG [RS:0;e2fd862922a0:38131 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T10:31:38,045 DEBUG [RS:0;e2fd862922a0:38131 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2fd862922a0,38131,1732530697764 2024-11-25T10:31:38,045 DEBUG [RS:0;e2fd862922a0:38131 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,38131,1732530697764' 2024-11-25T10:31:38,045 DEBUG [RS:0;e2fd862922a0:38131 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T10:31:38,045 DEBUG [RS:0;e2fd862922a0:38131 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T10:31:38,045 DEBUG [RS:0;e2fd862922a0:38131 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T10:31:38,045 INFO [RS:0;e2fd862922a0:38131 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T10:31:38,045 INFO [RS:0;e2fd862922a0:38131 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T10:31:38,093 WARN [e2fd862922a0:44353 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T10:31:38,147 INFO [RS:0;e2fd862922a0:38131 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C38131%2C1732530697764, suffix=, logDir=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764, archiveDir=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/oldWALs, maxLogs=32 2024-11-25T10:31:38,148 INFO [RS:0;e2fd862922a0:38131 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C38131%2C1732530697764.1732530698147 2024-11-25T10:31:38,153 INFO [RS:0;e2fd862922a0:38131 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764/e2fd862922a0%2C38131%2C1732530697764.1732530698147 2024-11-25T10:31:38,154 DEBUG [RS:0;e2fd862922a0:38131 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33441:33441),(127.0.0.1/127.0.0.1:43287:43287)] 2024-11-25T10:31:38,343 DEBUG [e2fd862922a0:44353 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T10:31:38,343 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e2fd862922a0,38131,1732530697764 2024-11-25T10:31:38,345 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2fd862922a0,38131,1732530697764, state=OPENING 2024-11-25T10:31:38,346 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T10:31:38,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:38,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:31:38,348 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T10:31:38,348 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e2fd862922a0,38131,1732530697764}] 2024-11-25T10:31:38,348 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:31:38,348 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:31:38,501 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T10:31:38,503 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44859, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T10:31:38,506 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T10:31:38,506 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:31:38,507 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C38131%2C1732530697764.meta, suffix=.meta, logDir=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764, archiveDir=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/oldWALs, maxLogs=32 2024-11-25T10:31:38,508 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C38131%2C1732530697764.meta.1732530698508.meta 2024-11-25T10:31:38,513 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764/e2fd862922a0%2C38131%2C1732530697764.meta.1732530698508.meta 2024-11-25T10:31:38,521 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43287:43287),(127.0.0.1/127.0.0.1:33441:33441)] 2024-11-25T10:31:38,524 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:31:38,524 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T10:31:38,524 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T10:31:38,524 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T10:31:38,524 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T10:31:38,524 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:31:38,524 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T10:31:38,524 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T10:31:38,528 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T10:31:38,529 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T10:31:38,529 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:38,530 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:38,530 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T10:31:38,530 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T10:31:38,531 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:38,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:38,531 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T10:31:38,532 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T10:31:38,532 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:38,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:38,532 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T10:31:38,533 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T10:31:38,533 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:38,533 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:31:38,533 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T10:31:38,534 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740 2024-11-25T10:31:38,535 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740 2024-11-25T10:31:38,536 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T10:31:38,536 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T10:31:38,536 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T10:31:38,537 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T10:31:38,538 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770224, jitterRate=-0.02060963213443756}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T10:31:38,538 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T10:31:38,539 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732530698524Writing region info on filesystem at 1732530698524Initializing all the Stores at 1732530698525 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530698525Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530698528 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530698528Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530698528Cleaning up temporary data from old regions at 1732530698536 (+8 ms)Running coprocessor post-open hooks at 1732530698538 (+2 ms)Region opened successfully at 1732530698539 (+1 ms) 2024-11-25T10:31:38,540 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732530698501 2024-11-25T10:31:38,542 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T10:31:38,542 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T10:31:38,543 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e2fd862922a0,38131,1732530697764 2024-11-25T10:31:38,544 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2fd862922a0,38131,1732530697764, state=OPEN 2024-11-25T10:31:38,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T10:31:38,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T10:31:38,549 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e2fd862922a0,38131,1732530697764 2024-11-25T10:31:38,549 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:31:38,549 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:31:38,551 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T10:31:38,551 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e2fd862922a0,38131,1732530697764 in 201 msec 2024-11-25T10:31:38,554 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T10:31:38,554 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 611 msec 2024-11-25T10:31:38,555 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:31:38,555 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T10:31:38,556 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T10:31:38,556 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2fd862922a0,38131,1732530697764, seqNum=-1] 2024-11-25T10:31:38,556 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T10:31:38,558 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47761, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T10:31:38,563 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 656 msec 2024-11-25T10:31:38,563 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732530698563, completionTime=-1 2024-11-25T10:31:38,564 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T10:31:38,564 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T10:31:38,565 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T10:31:38,565 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732530758565 2024-11-25T10:31:38,565 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732530818565 2024-11-25T10:31:38,565 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-25T10:31:38,566 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,44353,1732530697722-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,566 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,44353,1732530697722-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,566 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,44353,1732530697722-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,566 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e2fd862922a0:44353, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,566 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,566 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,567 DEBUG [master/e2fd862922a0:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T10:31:38,569 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.777sec 2024-11-25T10:31:38,569 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T10:31:38,569 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T10:31:38,569 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T10:31:38,569 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T10:31:38,569 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T10:31:38,569 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,44353,1732530697722-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:31:38,569 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,44353,1732530697722-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T10:31:38,571 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T10:31:38,571 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T10:31:38,572 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,44353,1732530697722-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:31:38,577 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@523d16c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:31:38,577 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e2fd862922a0,44353,-1 for getting cluster id 2024-11-25T10:31:38,577 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T10:31:38,579 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f9fde289-8395-4380-9817-c7f24322c925' 2024-11-25T10:31:38,579 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T10:31:38,580 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f9fde289-8395-4380-9817-c7f24322c925" 2024-11-25T10:31:38,580 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e5d3593, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:31:38,580 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2fd862922a0,44353,-1] 2024-11-25T10:31:38,580 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T10:31:38,580 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:31:38,581 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55598, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T10:31:38,582 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6719db8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:31:38,583 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T10:31:38,584 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2fd862922a0,38131,1732530697764, seqNum=-1] 2024-11-25T10:31:38,584 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T10:31:38,585 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60706, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T10:31:38,586 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e2fd862922a0,44353,1732530697722 2024-11-25T10:31:38,587 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:31:38,590 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T10:31:38,590 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T10:31:38,591 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is e2fd862922a0,44353,1732530697722 2024-11-25T10:31:38,591 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@307a19aa 2024-11-25T10:31:38,592 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T10:31:38,592 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55608, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T10:31:38,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T10:31:38,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T10:31:38,593 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T10:31:38,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T10:31:38,596 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T10:31:38,596 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:38,596 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-25T10:31:38,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T10:31:38,597 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T10:31:38,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741835_1011 (size=405) 2024-11-25T10:31:38,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741835_1011 (size=405) 2024-11-25T10:31:38,606 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c78390ebaffbefc1fe74f4f5f79aded7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356 2024-11-25T10:31:38,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741836_1012 (size=88) 2024-11-25T10:31:38,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741836_1012 (size=88) 2024-11-25T10:31:38,612 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:31:38,612 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing c78390ebaffbefc1fe74f4f5f79aded7, disabling compactions & flushes 2024-11-25T10:31:38,612 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:31:38,612 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:31:38,612 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. after waiting 0 ms 2024-11-25T10:31:38,612 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:31:38,612 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:31:38,612 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for c78390ebaffbefc1fe74f4f5f79aded7: Waiting for close lock at 1732530698612Disabling compacts and flushes for region at 1732530698612Disabling writes for close at 1732530698612Writing region close event to WAL at 1732530698612Closed at 1732530698612 2024-11-25T10:31:38,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:38,614 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T10:31:38,614 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732530698614"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732530698614"}]},"ts":"1732530698614"} 2024-11-25T10:31:38,617 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T10:31:38,618 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T10:31:38,618 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732530698618"}]},"ts":"1732530698618"} 2024-11-25T10:31:38,620 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-25T10:31:38,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c78390ebaffbefc1fe74f4f5f79aded7, ASSIGN}] 2024-11-25T10:31:38,622 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c78390ebaffbefc1fe74f4f5f79aded7, ASSIGN 2024-11-25T10:31:38,623 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c78390ebaffbefc1fe74f4f5f79aded7, ASSIGN; state=OFFLINE, location=e2fd862922a0,38131,1732530697764; forceNewPlan=false, retain=false 2024-11-25T10:31:38,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:38,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:38,773 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c78390ebaffbefc1fe74f4f5f79aded7, regionState=OPENING, regionLocation=e2fd862922a0,38131,1732530697764 2024-11-25T10:31:38,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c78390ebaffbefc1fe74f4f5f79aded7, ASSIGN because future has completed 2024-11-25T10:31:38,776 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c78390ebaffbefc1fe74f4f5f79aded7, server=e2fd862922a0,38131,1732530697764}] 2024-11-25T10:31:38,932 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:31:38,932 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => c78390ebaffbefc1fe74f4f5f79aded7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7.', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:31:38,933 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling c78390ebaffbefc1fe74f4f5f79aded7 2024-11-25T10:31:38,933 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:31:38,933 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for c78390ebaffbefc1fe74f4f5f79aded7 2024-11-25T10:31:38,933 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for c78390ebaffbefc1fe74f4f5f79aded7 2024-11-25T10:31:38,934 INFO [StoreOpener-c78390ebaffbefc1fe74f4f5f79aded7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c78390ebaffbefc1fe74f4f5f79aded7 2024-11-25T10:31:38,935 INFO [StoreOpener-c78390ebaffbefc1fe74f4f5f79aded7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c78390ebaffbefc1fe74f4f5f79aded7 columnFamilyName info 2024-11-25T10:31:38,935 DEBUG [StoreOpener-c78390ebaffbefc1fe74f4f5f79aded7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:31:38,936 INFO [StoreOpener-c78390ebaffbefc1fe74f4f5f79aded7-1 {}] regionserver.HStore(327): Store=c78390ebaffbefc1fe74f4f5f79aded7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:31:38,936 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for c78390ebaffbefc1fe74f4f5f79aded7 2024-11-25T10:31:38,937 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7 2024-11-25T10:31:38,937 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7 2024-11-25T10:31:38,937 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for c78390ebaffbefc1fe74f4f5f79aded7 2024-11-25T10:31:38,937 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for c78390ebaffbefc1fe74f4f5f79aded7 2024-11-25T10:31:38,939 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for c78390ebaffbefc1fe74f4f5f79aded7 2024-11-25T10:31:38,941 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:31:38,941 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened c78390ebaffbefc1fe74f4f5f79aded7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=844707, jitterRate=0.07410167157649994}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T10:31:38,941 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c78390ebaffbefc1fe74f4f5f79aded7 2024-11-25T10:31:38,942 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for c78390ebaffbefc1fe74f4f5f79aded7: Running coprocessor pre-open hook at 1732530698933Writing region info on filesystem at 1732530698933Initializing all the Stores at 1732530698934 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530698934Cleaning up temporary data from old regions at 1732530698937 (+3 ms)Running coprocessor post-open hooks at 1732530698941 (+4 ms)Region opened successfully at 1732530698942 (+1 ms) 2024-11-25T10:31:38,943 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7., pid=6, masterSystemTime=1732530698929 2024-11-25T10:31:38,945 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:31:38,945 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:31:38,946 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=c78390ebaffbefc1fe74f4f5f79aded7, regionState=OPEN, openSeqNum=2, regionLocation=e2fd862922a0,38131,1732530697764 2024-11-25T10:31:38,948 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure c78390ebaffbefc1fe74f4f5f79aded7, server=e2fd862922a0,38131,1732530697764 because future has completed 2024-11-25T10:31:38,952 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T10:31:38,952 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure c78390ebaffbefc1fe74f4f5f79aded7, server=e2fd862922a0,38131,1732530697764 in 173 msec 2024-11-25T10:31:38,954 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T10:31:38,954 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=c78390ebaffbefc1fe74f4f5f79aded7, ASSIGN in 332 msec 2024-11-25T10:31:38,955 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T10:31:38,956 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732530698955"}]},"ts":"1732530698955"} 2024-11-25T10:31:38,957 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-25T10:31:38,958 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T10:31:38,960 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 365 msec 2024-11-25T10:31:39,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:39,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:39,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:39,952 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T10:31:39,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,968 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,969 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,969 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,972 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,972 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,972 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:39,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:31:40,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:40,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:40,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:41,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:41,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:41,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:42,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:42,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:42,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:43,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:43,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:43,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:44,004 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T10:31:44,004 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-25T10:31:44,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-25T10:31:44,327 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-25T10:31:44,327 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T10:31:44,327 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-25T10:31:44,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-25T10:31:44,328 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-25T10:31:44,328 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T10:31:44,328 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-25T10:31:44,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:44,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:44,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:45,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:45,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:45,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:46,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:46,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:46,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:47,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:47,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:47,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:48,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:48,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T10:31:48,681 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T10:31:48,681 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-25T10:31:48,684 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T10:31:48,684 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:31:48,687 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7., hostname=e2fd862922a0,38131,1732530697764, seqNum=2] 2024-11-25T10:31:48,694 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T10:31:48,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:48,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T10:31:48,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:48,700 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-25T10:31:48,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-25T10:31:48,701 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T10:31:48,702 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T10:31:48,862 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38131 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-25T10:31:48,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:31:48,863 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing c78390ebaffbefc1fe74f4f5f79aded7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T10:31:48,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/3d0e0f7a7b9149f2a0974c01217051e8 is 1080, key is row0001/info:/1732530708688/Put/seqid=0 2024-11-25T10:31:48,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741837_1013 (size=6033) 2024-11-25T10:31:48,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741837_1013 (size=6033) 2024-11-25T10:31:48,885 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/3d0e0f7a7b9149f2a0974c01217051e8 2024-11-25T10:31:48,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/3d0e0f7a7b9149f2a0974c01217051e8 as hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/3d0e0f7a7b9149f2a0974c01217051e8 2024-11-25T10:31:48,896 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/3d0e0f7a7b9149f2a0974c01217051e8, entries=1, sequenceid=5, filesize=5.9 K 2024-11-25T10:31:48,897 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c78390ebaffbefc1fe74f4f5f79aded7 in 34ms, sequenceid=5, compaction requested=false 2024-11-25T10:31:48,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for c78390ebaffbefc1fe74f4f5f79aded7: 2024-11-25T10:31:48,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:31:48,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-25T10:31:48,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-25T10:31:48,905 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-25T10:31:48,905 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 200 msec 2024-11-25T10:31:48,907 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 210 msec 2024-11-25T10:31:49,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:49,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:49,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:50,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:50,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:50,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:51,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:51,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:51,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:52,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:52,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:52,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:53,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:53,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:53,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:54,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:54,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:54,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:55,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:55,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:55,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:56,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:56,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:56,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:57,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:57,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:57,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:58,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:58,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:58,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:58,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-25T10:31:58,782 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T10:31:58,785 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T10:31:58,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T10:31:58,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-25T10:31:58,787 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-25T10:31:58,788 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T10:31:58,788 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T10:31:58,941 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38131 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-25T10:31:58,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:31:58,942 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing c78390ebaffbefc1fe74f4f5f79aded7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T10:31:58,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/ba80fea43a314089bb2fa12093e92c22 is 1080, key is row0002/info:/1732530718783/Put/seqid=0 2024-11-25T10:31:58,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741838_1014 (size=6033) 2024-11-25T10:31:58,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741838_1014 (size=6033) 2024-11-25T10:31:58,952 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/ba80fea43a314089bb2fa12093e92c22 2024-11-25T10:31:58,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/ba80fea43a314089bb2fa12093e92c22 as hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/ba80fea43a314089bb2fa12093e92c22 2024-11-25T10:31:58,963 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/ba80fea43a314089bb2fa12093e92c22, entries=1, sequenceid=9, filesize=5.9 K 2024-11-25T10:31:58,964 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c78390ebaffbefc1fe74f4f5f79aded7 in 23ms, sequenceid=9, compaction requested=false 2024-11-25T10:31:58,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for c78390ebaffbefc1fe74f4f5f79aded7: 2024-11-25T10:31:58,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:31:58,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-25T10:31:58,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-25T10:31:58,969 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-25T10:31:58,969 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-11-25T10:31:58,971 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-11-25T10:31:59,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:59,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:59,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 after 68042ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:31:59,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:31:59,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta after 68035ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-25T10:32:00,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:00,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:00,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:01,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:01,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:01,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:02,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:02,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:02,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:03,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:03,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:03,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:04,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:04,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:04,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:05,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:05,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:05,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:06,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:06,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:06,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:07,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:07,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:07,706 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T10:32:07,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:08,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:08,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:08,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:08,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-25T10:32:08,882 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T10:32:08,885 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C38131%2C1732530697764.1732530728884 2024-11-25T10:32:08,892 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:08,893 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:08,893 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:08,893 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:08,893 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:08,893 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764/e2fd862922a0%2C38131%2C1732530697764.1732530698147 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764/e2fd862922a0%2C38131%2C1732530697764.1732530728884 2024-11-25T10:32:08,894 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33441:33441),(127.0.0.1/127.0.0.1:43287:43287)] 2024-11-25T10:32:08,894 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764/e2fd862922a0%2C38131%2C1732530697764.1732530698147 is not closed yet, will try archiving it next time 2024-11-25T10:32:08,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741833_1009 (size=5546) 2024-11-25T10:32:08,895 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T10:32:08,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741833_1009 (size=5546) 2024-11-25T10:32:08,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T10:32:08,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-25T10:32:08,897 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-25T10:32:08,898 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T10:32:08,898 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T10:32:09,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38131 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-25T10:32:09,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:32:09,052 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing c78390ebaffbefc1fe74f4f5f79aded7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T10:32:09,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/f075cbcd45fb4b59a1f392a0eef234be is 1080, key is row0003/info:/1732530728883/Put/seqid=0 2024-11-25T10:32:09,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741840_1016 (size=6033) 2024-11-25T10:32:09,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741840_1016 (size=6033) 2024-11-25T10:32:09,062 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/f075cbcd45fb4b59a1f392a0eef234be 2024-11-25T10:32:09,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/f075cbcd45fb4b59a1f392a0eef234be as hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/f075cbcd45fb4b59a1f392a0eef234be 2024-11-25T10:32:09,072 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/f075cbcd45fb4b59a1f392a0eef234be, entries=1, sequenceid=13, filesize=5.9 K 2024-11-25T10:32:09,073 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c78390ebaffbefc1fe74f4f5f79aded7 in 21ms, sequenceid=13, compaction requested=true 2024-11-25T10:32:09,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for c78390ebaffbefc1fe74f4f5f79aded7: 2024-11-25T10:32:09,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:32:09,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-25T10:32:09,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-25T10:32:09,077 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-25T10:32:09,077 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-25T10:32:09,079 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-11-25T10:32:09,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:09,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:09,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:10,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:10,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:10,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:11,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:11,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:11,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:12,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:12,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:12,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:13,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:13,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:13,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:14,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:14,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:14,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:15,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:15,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:15,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:16,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:16,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:16,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:17,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:17,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:17,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:18,572 INFO [master/e2fd862922a0:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-25T10:32:18,572 INFO [master/e2fd862922a0:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-25T10:32:18,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:18,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:18,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:18,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-25T10:32:18,922 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T10:32:18,922 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T10:32:18,924 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T10:32:18,924 DEBUG [Time-limited test {}] regionserver.HStore(1541): c78390ebaffbefc1fe74f4f5f79aded7/info is initiating minor compaction (all files) 2024-11-25T10:32:18,924 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T10:32:18,924 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:18,924 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of c78390ebaffbefc1fe74f4f5f79aded7/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:32:18,924 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/3d0e0f7a7b9149f2a0974c01217051e8, hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/ba80fea43a314089bb2fa12093e92c22, hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/f075cbcd45fb4b59a1f392a0eef234be] into tmpdir=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp, totalSize=17.7 K 2024-11-25T10:32:18,925 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 3d0e0f7a7b9149f2a0974c01217051e8, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732530708688 2024-11-25T10:32:18,925 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ba80fea43a314089bb2fa12093e92c22, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732530718783 2024-11-25T10:32:18,925 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f075cbcd45fb4b59a1f392a0eef234be, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732530728883 2024-11-25T10:32:18,936 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): c78390ebaffbefc1fe74f4f5f79aded7#info#compaction#44 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:32:18,936 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/7cd1fd385a634df89394b54588d55061 is 1080, key is row0001/info:/1732530708688/Put/seqid=0 2024-11-25T10:32:18,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741841_1017 (size=8296) 2024-11-25T10:32:18,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741841_1017 (size=8296) 2024-11-25T10:32:18,947 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/7cd1fd385a634df89394b54588d55061 as hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/7cd1fd385a634df89394b54588d55061 2024-11-25T10:32:18,953 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c78390ebaffbefc1fe74f4f5f79aded7/info of c78390ebaffbefc1fe74f4f5f79aded7 into 7cd1fd385a634df89394b54588d55061(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:32:18,953 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for c78390ebaffbefc1fe74f4f5f79aded7: 2024-11-25T10:32:18,956 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C38131%2C1732530697764.1732530738956 2024-11-25T10:32:18,962 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:18,963 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:18,963 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:18,963 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:18,963 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:18,963 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764/e2fd862922a0%2C38131%2C1732530697764.1732530728884 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764/e2fd862922a0%2C38131%2C1732530697764.1732530738956 2024-11-25T10:32:18,964 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43287:43287),(127.0.0.1/127.0.0.1:33441:33441)] 2024-11-25T10:32:18,964 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764/e2fd862922a0%2C38131%2C1732530697764.1732530728884 is not closed yet, will try archiving it next time 2024-11-25T10:32:18,964 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764/e2fd862922a0%2C38131%2C1732530697764.1732530698147 to hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/oldWALs/e2fd862922a0%2C38131%2C1732530697764.1732530698147 2024-11-25T10:32:18,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741839_1015 (size=2520) 2024-11-25T10:32:18,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741839_1015 (size=2520) 2024-11-25T10:32:18,965 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T10:32:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T10:32:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-25T10:32:18,967 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-25T10:32:18,968 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-25T10:32:18,968 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-25T10:32:19,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38131 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-25T10:32:19,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:32:19,122 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing c78390ebaffbefc1fe74f4f5f79aded7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T10:32:19,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/df3077bc36dd4209bc79c42b44bb666e is 1080, key is row0000/info:/1732530738954/Put/seqid=0 2024-11-25T10:32:19,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741843_1019 (size=6033) 2024-11-25T10:32:19,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741843_1019 (size=6033) 2024-11-25T10:32:19,132 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/df3077bc36dd4209bc79c42b44bb666e 2024-11-25T10:32:19,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/df3077bc36dd4209bc79c42b44bb666e as hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/df3077bc36dd4209bc79c42b44bb666e 2024-11-25T10:32:19,142 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/df3077bc36dd4209bc79c42b44bb666e, entries=1, sequenceid=18, filesize=5.9 K 2024-11-25T10:32:19,143 INFO [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c78390ebaffbefc1fe74f4f5f79aded7 in 21ms, sequenceid=18, compaction requested=false 2024-11-25T10:32:19,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for c78390ebaffbefc1fe74f4f5f79aded7: 2024-11-25T10:32:19,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:32:19,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-25T10:32:19,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-25T10:32:19,148 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-25T10:32:19,148 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-25T10:32:19,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-11-25T10:32:19,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:19,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:19,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:20,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:20,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:20,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:21,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:21,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:21,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:22,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:22,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:22,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:23,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:23,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:23,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:23,933 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c78390ebaffbefc1fe74f4f5f79aded7, had cached 0 bytes from a total of 14329 2024-11-25T10:32:24,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:24,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:24,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:25,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:25,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:25,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:26,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:26,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:26,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:27,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:27,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:27,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:28,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:28,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:28,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:29,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44353 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-25T10:32:29,002 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-25T10:32:29,005 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C38131%2C1732530697764.1732530749005 2024-11-25T10:32:29,011 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,011 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,011 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,011 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,011 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,012 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764/e2fd862922a0%2C38131%2C1732530697764.1732530738956 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764/e2fd862922a0%2C38131%2C1732530697764.1732530749005 2024-11-25T10:32:29,012 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33441:33441),(127.0.0.1/127.0.0.1:43287:43287)] 2024-11-25T10:32:29,012 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764/e2fd862922a0%2C38131%2C1732530697764.1732530738956 is not closed yet, will try archiving it next time 2024-11-25T10:32:29,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T10:32:29,012 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/WALs/e2fd862922a0,38131,1732530697764/e2fd862922a0%2C38131%2C1732530697764.1732530728884 to hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/oldWALs/e2fd862922a0%2C38131%2C1732530697764.1732530728884 2024-11-25T10:32:29,012 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T10:32:29,013 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:32:29,013 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:32:29,013 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:32:29,013 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T10:32:29,013 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T10:32:29,013 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=704393803, stopped=false 2024-11-25T10:32:29,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741842_1018 (size=2026) 2024-11-25T10:32:29,013 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e2fd862922a0,44353,1732530697722 2024-11-25T10:32:29,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:32:29,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:32:29,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:29,015 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T10:32:29,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:29,015 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T10:32:29,015 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:32:29,015 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:32:29,015 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2fd862922a0,38131,1732530697764' ***** 2024-11-25T10:32:29,015 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T10:32:29,015 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:32:29,016 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T10:32:29,016 INFO [RS:0;e2fd862922a0:38131 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T10:32:29,016 INFO [RS:0;e2fd862922a0:38131 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T10:32:29,016 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T10:32:29,016 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(3091): Received CLOSE for c78390ebaffbefc1fe74f4f5f79aded7 2024-11-25T10:32:29,016 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:32:29,016 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(959): stopping server e2fd862922a0,38131,1732530697764 2024-11-25T10:32:29,016 INFO [RS:0;e2fd862922a0:38131 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:32:29,016 INFO [RS:0;e2fd862922a0:38131 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e2fd862922a0:38131. 2024-11-25T10:32:29,016 DEBUG [RS:0;e2fd862922a0:38131 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:32:29,016 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c78390ebaffbefc1fe74f4f5f79aded7, disabling compactions & flushes 2024-11-25T10:32:29,016 DEBUG [RS:0;e2fd862922a0:38131 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:32:29,016 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:32:29,016 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:32:29,016 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T10:32:29,016 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T10:32:29,016 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T10:32:29,016 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T10:32:29,017 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. after waiting 0 ms 2024-11-25T10:32:29,017 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-25T10:32:29,017 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:32:29,017 DEBUG [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, c78390ebaffbefc1fe74f4f5f79aded7=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7.} 2024-11-25T10:32:29,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741842_1018 (size=2026) 2024-11-25T10:32:29,017 DEBUG [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, c78390ebaffbefc1fe74f4f5f79aded7 2024-11-25T10:32:29,017 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing c78390ebaffbefc1fe74f4f5f79aded7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-25T10:32:29,017 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T10:32:29,017 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T10:32:29,017 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T10:32:29,017 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T10:32:29,017 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T10:32:29,017 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-25T10:32:29,021 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/f4683b93a9ca47bba138a86a29bf7f82 is 1080, key is row0001/info:/1732530749003/Put/seqid=0 2024-11-25T10:32:29,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741845_1021 (size=6033) 2024-11-25T10:32:29,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741845_1021 (size=6033) 2024-11-25T10:32:29,026 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/f4683b93a9ca47bba138a86a29bf7f82 2024-11-25T10:32:29,032 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/.tmp/info/f4683b93a9ca47bba138a86a29bf7f82 as hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/f4683b93a9ca47bba138a86a29bf7f82 2024-11-25T10:32:29,034 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/.tmp/info/9c90dc4603ee45a9b506c9a67ff855d9 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7./info:regioninfo/1732530698946/Put/seqid=0 2024-11-25T10:32:29,040 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/f4683b93a9ca47bba138a86a29bf7f82, entries=1, sequenceid=22, filesize=5.9 K 2024-11-25T10:32:29,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741846_1022 (size=7308) 2024-11-25T10:32:29,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741846_1022 (size=7308) 2024-11-25T10:32:29,042 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c78390ebaffbefc1fe74f4f5f79aded7 in 25ms, sequenceid=22, compaction requested=true 2024-11-25T10:32:29,042 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/.tmp/info/9c90dc4603ee45a9b506c9a67ff855d9 2024-11-25T10:32:29,044 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/3d0e0f7a7b9149f2a0974c01217051e8, hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/ba80fea43a314089bb2fa12093e92c22, hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/f075cbcd45fb4b59a1f392a0eef234be] to archive 2024-11-25T10:32:29,045 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T10:32:29,047 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/3d0e0f7a7b9149f2a0974c01217051e8 to hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/3d0e0f7a7b9149f2a0974c01217051e8 2024-11-25T10:32:29,048 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/ba80fea43a314089bb2fa12093e92c22 to hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/ba80fea43a314089bb2fa12093e92c22 2024-11-25T10:32:29,049 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/f075cbcd45fb4b59a1f392a0eef234be to hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/info/f075cbcd45fb4b59a1f392a0eef234be 2024-11-25T10:32:29,050 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e2fd862922a0:44353 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-25T10:32:29,050 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [3d0e0f7a7b9149f2a0974c01217051e8=6033, ba80fea43a314089bb2fa12093e92c22=6033, f075cbcd45fb4b59a1f392a0eef234be=6033] 2024-11-25T10:32:29,056 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/c78390ebaffbefc1fe74f4f5f79aded7/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-25T10:32:29,056 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:32:29,056 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c78390ebaffbefc1fe74f4f5f79aded7: Waiting for close lock at 1732530749016Running coprocessor pre-close hooks at 1732530749016Disabling compacts and flushes for region at 1732530749016Disabling writes for close at 1732530749017 (+1 ms)Obtaining lock to block concurrent updates at 1732530749017Preparing flush snapshotting stores in c78390ebaffbefc1fe74f4f5f79aded7 at 1732530749017Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732530749017Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. at 1732530749018 (+1 ms)Flushing c78390ebaffbefc1fe74f4f5f79aded7/info: creating writer at 1732530749018Flushing c78390ebaffbefc1fe74f4f5f79aded7/info: appending metadata at 1732530749020 (+2 ms)Flushing c78390ebaffbefc1fe74f4f5f79aded7/info: closing flushed file at 1732530749021 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f3e6076: reopening flushed file at 1732530749031 (+10 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for c78390ebaffbefc1fe74f4f5f79aded7 in 25ms, sequenceid=22, compaction requested=true at 1732530749042 (+11 ms)Writing region close event to WAL at 1732530749052 (+10 ms)Running coprocessor post-close hooks at 1732530749056 (+4 ms)Closed at 1732530749056 2024-11-25T10:32:29,057 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732530698593.c78390ebaffbefc1fe74f4f5f79aded7. 2024-11-25T10:32:29,065 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/.tmp/ns/5ada5788987f4ee7a896dad7186806f3 is 43, key is default/ns:d/1732530698558/Put/seqid=0 2024-11-25T10:32:29,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741847_1023 (size=5153) 2024-11-25T10:32:29,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741847_1023 (size=5153) 2024-11-25T10:32:29,070 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/.tmp/ns/5ada5788987f4ee7a896dad7186806f3 2024-11-25T10:32:29,088 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/.tmp/table/eb039d806a494bf9a07c32cb95f9d7ba is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732530698955/Put/seqid=0 2024-11-25T10:32:29,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741848_1024 (size=5508) 2024-11-25T10:32:29,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741848_1024 (size=5508) 2024-11-25T10:32:29,094 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/.tmp/table/eb039d806a494bf9a07c32cb95f9d7ba 2024-11-25T10:32:29,099 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/.tmp/info/9c90dc4603ee45a9b506c9a67ff855d9 as hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/info/9c90dc4603ee45a9b506c9a67ff855d9 2024-11-25T10:32:29,103 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/info/9c90dc4603ee45a9b506c9a67ff855d9, entries=10, sequenceid=11, filesize=7.1 K 2024-11-25T10:32:29,104 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/.tmp/ns/5ada5788987f4ee7a896dad7186806f3 as hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/ns/5ada5788987f4ee7a896dad7186806f3 2024-11-25T10:32:29,109 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/ns/5ada5788987f4ee7a896dad7186806f3, entries=2, sequenceid=11, filesize=5.0 K 2024-11-25T10:32:29,110 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/.tmp/table/eb039d806a494bf9a07c32cb95f9d7ba as hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/table/eb039d806a494bf9a07c32cb95f9d7ba 2024-11-25T10:32:29,114 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/table/eb039d806a494bf9a07c32cb95f9d7ba, entries=2, sequenceid=11, filesize=5.4 K 2024-11-25T10:32:29,115 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false 2024-11-25T10:32:29,119 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-25T10:32:29,119 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T10:32:29,120 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T10:32:29,120 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732530749017Running coprocessor pre-close hooks at 1732530749017Disabling compacts and flushes for region at 1732530749017Disabling writes for close at 1732530749017Obtaining lock to block concurrent updates at 1732530749017Preparing flush snapshotting stores in 1588230740 at 1732530749017Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732530749017Flushing stores of hbase:meta,,1.1588230740 at 1732530749018 (+1 ms)Flushing 1588230740/info: creating writer at 1732530749018Flushing 1588230740/info: appending metadata at 1732530749034 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732530749034Flushing 1588230740/ns: creating writer at 1732530749047 (+13 ms)Flushing 1588230740/ns: appending metadata at 1732530749064 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732530749065 (+1 ms)Flushing 1588230740/table: creating writer at 1732530749074 (+9 ms)Flushing 1588230740/table: appending metadata at 1732530749088 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732530749088Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c650548: reopening flushed file at 1732530749098 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ece1bd4: reopening flushed file at 1732530749104 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60358074: reopening flushed file at 1732530749109 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false at 1732530749115 (+6 ms)Writing region close event to WAL at 1732530749116 (+1 ms)Running coprocessor post-close hooks at 1732530749119 (+3 ms)Closed at 1732530749120 (+1 ms) 2024-11-25T10:32:29,120 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T10:32:29,217 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(976): stopping server e2fd862922a0,38131,1732530697764; all regions closed. 2024-11-25T10:32:29,217 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,218 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,218 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,218 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,218 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741834_1010 (size=3306) 2024-11-25T10:32:29,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741834_1010 (size=3306) 2024-11-25T10:32:29,222 DEBUG [RS:0;e2fd862922a0:38131 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/oldWALs 2024-11-25T10:32:29,222 INFO [RS:0;e2fd862922a0:38131 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C38131%2C1732530697764.meta:.meta(num 1732530698508) 2024-11-25T10:32:29,223 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,223 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,223 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,223 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,223 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741844_1020 (size=1252) 2024-11-25T10:32:29,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741844_1020 (size=1252) 2024-11-25T10:32:29,227 DEBUG [RS:0;e2fd862922a0:38131 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/oldWALs 2024-11-25T10:32:29,227 INFO [RS:0;e2fd862922a0:38131 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C38131%2C1732530697764:(num 1732530749005) 2024-11-25T10:32:29,228 DEBUG [RS:0;e2fd862922a0:38131 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:32:29,228 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:32:29,228 INFO [RS:0;e2fd862922a0:38131 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:32:29,228 INFO [RS:0;e2fd862922a0:38131 {}] hbase.ChoreService(370): Chore service for: regionserver/e2fd862922a0:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-25T10:32:29,228 INFO [RS:0;e2fd862922a0:38131 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:32:29,228 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:32:29,228 INFO [RS:0;e2fd862922a0:38131 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38131 2024-11-25T10:32:29,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2fd862922a0,38131,1732530697764 2024-11-25T10:32:29,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:32:29,230 INFO [RS:0;e2fd862922a0:38131 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:32:29,231 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2fd862922a0,38131,1732530697764] 2024-11-25T10:32:29,233 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2fd862922a0,38131,1732530697764 already deleted, retry=false 2024-11-25T10:32:29,233 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2fd862922a0,38131,1732530697764 expired; onlineServers=0 2024-11-25T10:32:29,233 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e2fd862922a0,44353,1732530697722' ***** 2024-11-25T10:32:29,234 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T10:32:29,234 INFO [M:0;e2fd862922a0:44353 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:32:29,234 INFO [M:0;e2fd862922a0:44353 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:32:29,234 DEBUG [M:0;e2fd862922a0:44353 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T10:32:29,234 DEBUG [M:0;e2fd862922a0:44353 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T10:32:29,234 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T10:32:29,234 DEBUG [master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530697912 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530697912,5,FailOnTimeoutGroup] 2024-11-25T10:32:29,234 DEBUG [master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530697911 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530697911,5,FailOnTimeoutGroup] 2024-11-25T10:32:29,234 INFO [M:0;e2fd862922a0:44353 {}] hbase.ChoreService(370): Chore service for: master/e2fd862922a0:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T10:32:29,234 INFO [M:0;e2fd862922a0:44353 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:32:29,234 DEBUG [M:0;e2fd862922a0:44353 {}] master.HMaster(1795): Stopping service threads 2024-11-25T10:32:29,234 INFO [M:0;e2fd862922a0:44353 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T10:32:29,234 INFO [M:0;e2fd862922a0:44353 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T10:32:29,234 INFO [M:0;e2fd862922a0:44353 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T10:32:29,234 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T10:32:29,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T10:32:29,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:29,236 DEBUG [M:0;e2fd862922a0:44353 {}] zookeeper.ZKUtil(347): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T10:32:29,236 WARN [M:0;e2fd862922a0:44353 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T10:32:29,236 INFO [M:0;e2fd862922a0:44353 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/.lastflushedseqids 2024-11-25T10:32:29,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741849_1025 (size=130) 2024-11-25T10:32:29,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741849_1025 (size=130) 2024-11-25T10:32:29,241 INFO [M:0;e2fd862922a0:44353 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T10:32:29,241 INFO [M:0;e2fd862922a0:44353 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T10:32:29,242 DEBUG [M:0;e2fd862922a0:44353 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T10:32:29,242 INFO [M:0;e2fd862922a0:44353 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:32:29,242 DEBUG [M:0;e2fd862922a0:44353 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:32:29,242 DEBUG [M:0;e2fd862922a0:44353 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T10:32:29,242 DEBUG [M:0;e2fd862922a0:44353 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:32:29,242 INFO [M:0;e2fd862922a0:44353 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.62 KB heapSize=55.02 KB 2024-11-25T10:32:29,265 DEBUG [M:0;e2fd862922a0:44353 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3e963d4814a546cb9533719ccdac4a5a is 82, key is hbase:meta,,1/info:regioninfo/1732530698543/Put/seqid=0 2024-11-25T10:32:29,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741850_1026 (size=5672) 2024-11-25T10:32:29,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741850_1026 (size=5672) 2024-11-25T10:32:29,270 INFO [M:0;e2fd862922a0:44353 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3e963d4814a546cb9533719ccdac4a5a 2024-11-25T10:32:29,289 DEBUG [M:0;e2fd862922a0:44353 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/62f53e9897d24b238a9956698f47f7e3 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732530698960/Put/seqid=0 2024-11-25T10:32:29,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741851_1027 (size=7826) 2024-11-25T10:32:29,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741851_1027 (size=7826) 2024-11-25T10:32:29,294 INFO [M:0;e2fd862922a0:44353 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.02 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/62f53e9897d24b238a9956698f47f7e3 2024-11-25T10:32:29,299 INFO [M:0;e2fd862922a0:44353 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 62f53e9897d24b238a9956698f47f7e3 2024-11-25T10:32:29,313 DEBUG [M:0;e2fd862922a0:44353 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/96b25576d39540a7bf662eda18e2d519 is 69, key is e2fd862922a0,38131,1732530697764/rs:state/1732530697997/Put/seqid=0 2024-11-25T10:32:29,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741852_1028 (size=5156) 2024-11-25T10:32:29,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741852_1028 (size=5156) 2024-11-25T10:32:29,318 INFO [M:0;e2fd862922a0:44353 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/96b25576d39540a7bf662eda18e2d519 2024-11-25T10:32:29,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:32:29,332 INFO [RS:0;e2fd862922a0:38131 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:32:29,332 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38131-0x1014eae607c0001, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:32:29,332 INFO [RS:0;e2fd862922a0:38131 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2fd862922a0,38131,1732530697764; zookeeper connection closed. 2024-11-25T10:32:29,332 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4d0cae33 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4d0cae33 2024-11-25T10:32:29,332 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T10:32:29,335 DEBUG [M:0;e2fd862922a0:44353 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d91aeebdef994deea3761db198887974 is 52, key is load_balancer_on/state:d/1732530698589/Put/seqid=0 2024-11-25T10:32:29,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741853_1029 (size=5056) 2024-11-25T10:32:29,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741853_1029 (size=5056) 2024-11-25T10:32:29,340 INFO [M:0;e2fd862922a0:44353 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d91aeebdef994deea3761db198887974 2024-11-25T10:32:29,344 DEBUG [M:0;e2fd862922a0:44353 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3e963d4814a546cb9533719ccdac4a5a as hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3e963d4814a546cb9533719ccdac4a5a 2024-11-25T10:32:29,349 INFO [M:0;e2fd862922a0:44353 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3e963d4814a546cb9533719ccdac4a5a, entries=8, sequenceid=121, filesize=5.5 K 2024-11-25T10:32:29,349 DEBUG [M:0;e2fd862922a0:44353 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/62f53e9897d24b238a9956698f47f7e3 as hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/62f53e9897d24b238a9956698f47f7e3 2024-11-25T10:32:29,353 INFO [M:0;e2fd862922a0:44353 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 62f53e9897d24b238a9956698f47f7e3 2024-11-25T10:32:29,353 INFO [M:0;e2fd862922a0:44353 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/62f53e9897d24b238a9956698f47f7e3, entries=14, sequenceid=121, filesize=7.6 K 2024-11-25T10:32:29,354 DEBUG [M:0;e2fd862922a0:44353 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/96b25576d39540a7bf662eda18e2d519 as hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/96b25576d39540a7bf662eda18e2d519 2024-11-25T10:32:29,358 INFO [M:0;e2fd862922a0:44353 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/96b25576d39540a7bf662eda18e2d519, entries=1, sequenceid=121, filesize=5.0 K 2024-11-25T10:32:29,359 DEBUG [M:0;e2fd862922a0:44353 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d91aeebdef994deea3761db198887974 as hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d91aeebdef994deea3761db198887974 2024-11-25T10:32:29,363 INFO [M:0;e2fd862922a0:44353 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41211/user/jenkins/test-data/914812cc-89ec-c333-0191-9a91b7ae2356/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d91aeebdef994deea3761db198887974, entries=1, sequenceid=121, filesize=4.9 K 2024-11-25T10:32:29,364 INFO [M:0;e2fd862922a0:44353 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.62 KB/44668, heapSize ~54.96 KB/56280, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=121, compaction requested=false 2024-11-25T10:32:29,365 INFO [M:0;e2fd862922a0:44353 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:32:29,365 DEBUG [M:0;e2fd862922a0:44353 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732530749242Disabling compacts and flushes for region at 1732530749242Disabling writes for close at 1732530749242Obtaining lock to block concurrent updates at 1732530749242Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732530749242Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44668, getHeapSize=56280, getOffHeapSize=0, getCellsCount=140 at 1732530749242Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732530749243 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732530749243Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732530749264 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732530749264Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732530749275 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732530749288 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732530749289 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732530749299 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732530749313 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732530749313Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732530749322 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732530749335 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732530749335Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37d8443a: reopening flushed file at 1732530749344 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f40f279: reopening flushed file at 1732530749349 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63f5b31a: reopening flushed file at 1732530749353 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e849dc9: reopening flushed file at 1732530749358 (+5 ms)Finished flush of dataSize ~43.62 KB/44668, heapSize ~54.96 KB/56280, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=121, compaction requested=false at 1732530749364 (+6 ms)Writing region close event to WAL at 1732530749365 (+1 ms)Closed at 1732530749365 2024-11-25T10:32:29,366 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,366 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,366 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,366 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,366 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:32:29,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35331 is added to blk_1073741830_1006 (size=53065) 2024-11-25T10:32:29,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37645 is added to blk_1073741830_1006 (size=53065) 2024-11-25T10:32:29,368 INFO [M:0;e2fd862922a0:44353 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T10:32:29,368 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:32:29,368 INFO [M:0;e2fd862922a0:44353 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44353 2024-11-25T10:32:29,368 INFO [M:0;e2fd862922a0:44353 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:32:29,470 INFO [M:0;e2fd862922a0:44353 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:32:29,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:32:29,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44353-0x1014eae607c0000, quorum=127.0.0.1:65080, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:32:29,472 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1b5fa12b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:32:29,472 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ecaedd6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:32:29,473 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:32:29,473 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@231eb320{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:32:29,473 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16fb9c03{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/hadoop.log.dir/,STOPPED} 2024-11-25T10:32:29,474 WARN [BP-1489323801-172.17.0.2-1732530696970 heartbeating to localhost/127.0.0.1:41211 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:32:29,474 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:32:29,474 WARN [BP-1489323801-172.17.0.2-1732530696970 heartbeating to localhost/127.0.0.1:41211 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1489323801-172.17.0.2-1732530696970 (Datanode Uuid 9100fa0a-b470-4a41-9fb3-5145d4fd8b82) service to localhost/127.0.0.1:41211 2024-11-25T10:32:29,474 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:32:29,475 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/cluster_c292f939-2c1b-58e6-c260-a895c4c67133/data/data3/current/BP-1489323801-172.17.0.2-1732530696970 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:32:29,475 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/cluster_c292f939-2c1b-58e6-c260-a895c4c67133/data/data4/current/BP-1489323801-172.17.0.2-1732530696970 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:32:29,475 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:32:29,477 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e7fac47{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:32:29,477 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a3c93e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:32:29,477 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:32:29,478 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1850586f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:32:29,478 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7934466e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/hadoop.log.dir/,STOPPED} 2024-11-25T10:32:29,479 WARN [BP-1489323801-172.17.0.2-1732530696970 heartbeating to localhost/127.0.0.1:41211 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:32:29,479 WARN [BP-1489323801-172.17.0.2-1732530696970 heartbeating to localhost/127.0.0.1:41211 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1489323801-172.17.0.2-1732530696970 (Datanode Uuid 986b6cbd-05f0-4330-8bd0-63df6f447ead) service to localhost/127.0.0.1:41211 2024-11-25T10:32:29,479 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:32:29,479 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:32:29,479 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/cluster_c292f939-2c1b-58e6-c260-a895c4c67133/data/data1/current/BP-1489323801-172.17.0.2-1732530696970 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:32:29,480 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/cluster_c292f939-2c1b-58e6-c260-a895c4c67133/data/data2/current/BP-1489323801-172.17.0.2-1732530696970 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:32:29,480 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:32:29,486 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6955024e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T10:32:29,486 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b7f365e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:32:29,486 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:32:29,486 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@637fc016{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:32:29,486 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bd654dd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/hadoop.log.dir/,STOPPED} 2024-11-25T10:32:29,492 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T10:32:29,509 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T10:32:29,517 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=207 (was 180) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41211 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41211 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:41211 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:41211 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41211 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41211 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/e2fd862922a0:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41211 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:41211 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:41211 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=47 (was 71), ProcessCount=11 (was 11), AvailableMemoryMB=1535 (was 1579) 2024-11-25T10:32:29,524 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=207, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=47, ProcessCount=11, AvailableMemoryMB=1535 2024-11-25T10:32:29,524 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T10:32:29,524 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/hadoop.log.dir so I do NOT create it in target/test-data/253470b1-d071-61be-80c0-b03a2d60625f 2024-11-25T10:32:29,524 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/5a0bd4c6-4316-7c29-dfba-f5291a0b4492/hadoop.tmp.dir so I do NOT create it in target/test-data/253470b1-d071-61be-80c0-b03a2d60625f 2024-11-25T10:32:29,524 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/cluster_280f76f3-9441-dd7e-295f-f768874e4966, deleteOnExit=true 2024-11-25T10:32:29,524 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T10:32:29,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/test.cache.data in system properties and HBase conf 2024-11-25T10:32:29,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T10:32:29,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/hadoop.log.dir in system properties and HBase conf 2024-11-25T10:32:29,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T10:32:29,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T10:32:29,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T10:32:29,525 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T10:32:29,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T10:32:29,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T10:32:29,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T10:32:29,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T10:32:29,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T10:32:29,525 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T10:32:29,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T10:32:29,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T10:32:29,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T10:32:29,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/nfs.dump.dir in system properties and HBase conf 2024-11-25T10:32:29,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/java.io.tmpdir in system properties and HBase conf 2024-11-25T10:32:29,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T10:32:29,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T10:32:29,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T10:32:29,539 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T10:32:29,593 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:32:29,597 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:32:29,598 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:32:29,598 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:32:29,598 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:32:29,599 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:32:29,599 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18de480b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:32:29,599 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7142f9c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:32:29,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:29,713 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@647c0ca7{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/java.io.tmpdir/jetty-localhost-39365-hadoop-hdfs-3_4_1-tests_jar-_-any-8185077078838741370/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T10:32:29,714 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@458b9f0d{HTTP/1.1, (http/1.1)}{localhost:39365} 2024-11-25T10:32:29,714 INFO [Time-limited test {}] server.Server(415): Started @241226ms 2024-11-25T10:32:29,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:29,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:29,727 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T10:32:29,780 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:32:29,782 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:32:29,783 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:32:29,783 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:32:29,783 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T10:32:29,783 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4300eaa8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:32:29,784 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4181d37d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:32:29,897 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@516e643a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/java.io.tmpdir/jetty-localhost-37383-hadoop-hdfs-3_4_1-tests_jar-_-any-14866225081431954245/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:32:29,897 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@27e7702e{HTTP/1.1, (http/1.1)}{localhost:37383} 2024-11-25T10:32:29,897 INFO [Time-limited test {}] server.Server(415): Started @241410ms 2024-11-25T10:32:29,898 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:32:29,947 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:32:29,949 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:32:29,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:32:29,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:32:29,950 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:32:29,950 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43fb00bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:32:29,951 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@551592b1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:32:30,016 INFO [regionserver/e2fd862922a0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:32:30,017 WARN [Thread-1951 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/cluster_280f76f3-9441-dd7e-295f-f768874e4966/data/data1/current/BP-1457786347-172.17.0.2-1732530749544/current, will proceed with Du for space computation calculation, 2024-11-25T10:32:30,017 WARN [Thread-1952 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/cluster_280f76f3-9441-dd7e-295f-f768874e4966/data/data2/current/BP-1457786347-172.17.0.2-1732530749544/current, will proceed with Du for space computation calculation, 2024-11-25T10:32:30,039 WARN [Thread-1930 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:32:30,041 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x605ec5adafed69a5 with lease ID 0x23e0e7c28b618491: Processing first storage report for DS-7401e8c5-afb6-4399-bb2d-0e0d643bba76 from datanode DatanodeRegistration(127.0.0.1:44101, datanodeUuid=5ccaba14-c692-4edf-86ca-2ab234956338, infoPort=40965, infoSecurePort=0, ipcPort=44389, storageInfo=lv=-57;cid=testClusterID;nsid=580870846;c=1732530749544) 2024-11-25T10:32:30,041 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x605ec5adafed69a5 with lease ID 0x23e0e7c28b618491: from storage DS-7401e8c5-afb6-4399-bb2d-0e0d643bba76 node DatanodeRegistration(127.0.0.1:44101, datanodeUuid=5ccaba14-c692-4edf-86ca-2ab234956338, infoPort=40965, infoSecurePort=0, ipcPort=44389, storageInfo=lv=-57;cid=testClusterID;nsid=580870846;c=1732530749544), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:32:30,041 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x605ec5adafed69a5 with lease ID 0x23e0e7c28b618491: Processing first storage report for DS-61a65445-ba71-426f-ac13-b5bfa650a09b from datanode DatanodeRegistration(127.0.0.1:44101, datanodeUuid=5ccaba14-c692-4edf-86ca-2ab234956338, infoPort=40965, infoSecurePort=0, ipcPort=44389, storageInfo=lv=-57;cid=testClusterID;nsid=580870846;c=1732530749544) 2024-11-25T10:32:30,041 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x605ec5adafed69a5 with lease ID 0x23e0e7c28b618491: from storage DS-61a65445-ba71-426f-ac13-b5bfa650a09b node DatanodeRegistration(127.0.0.1:44101, datanodeUuid=5ccaba14-c692-4edf-86ca-2ab234956338, infoPort=40965, infoSecurePort=0, ipcPort=44389, storageInfo=lv=-57;cid=testClusterID;nsid=580870846;c=1732530749544), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:32:30,071 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3868302b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/java.io.tmpdir/jetty-localhost-38811-hadoop-hdfs-3_4_1-tests_jar-_-any-7544217347203174863/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:32:30,071 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@76385d85{HTTP/1.1, (http/1.1)}{localhost:38811} 2024-11-25T10:32:30,071 INFO [Time-limited test {}] server.Server(415): Started @241584ms 2024-11-25T10:32:30,072 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:32:30,172 WARN [Thread-1977 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/cluster_280f76f3-9441-dd7e-295f-f768874e4966/data/data3/current/BP-1457786347-172.17.0.2-1732530749544/current, will proceed with Du for space computation calculation, 2024-11-25T10:32:30,172 WARN [Thread-1978 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/cluster_280f76f3-9441-dd7e-295f-f768874e4966/data/data4/current/BP-1457786347-172.17.0.2-1732530749544/current, will proceed with Du for space computation calculation, 2024-11-25T10:32:30,188 WARN [Thread-1966 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:32:30,190 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfb5d6b64f0b92ce2 with lease ID 0x23e0e7c28b618492: Processing first storage report for DS-2d87efbb-2c75-4793-aadc-5cbbd265e427 from datanode DatanodeRegistration(127.0.0.1:32869, datanodeUuid=2dc40fde-6e02-4684-994c-c63536d17900, infoPort=32779, infoSecurePort=0, ipcPort=38285, storageInfo=lv=-57;cid=testClusterID;nsid=580870846;c=1732530749544) 2024-11-25T10:32:30,190 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb5d6b64f0b92ce2 with lease ID 0x23e0e7c28b618492: from storage DS-2d87efbb-2c75-4793-aadc-5cbbd265e427 node DatanodeRegistration(127.0.0.1:32869, datanodeUuid=2dc40fde-6e02-4684-994c-c63536d17900, infoPort=32779, infoSecurePort=0, ipcPort=38285, storageInfo=lv=-57;cid=testClusterID;nsid=580870846;c=1732530749544), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:32:30,190 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfb5d6b64f0b92ce2 with lease ID 0x23e0e7c28b618492: Processing first storage report for DS-7bc9647c-698e-4513-bb20-1ab90bee4719 from datanode DatanodeRegistration(127.0.0.1:32869, datanodeUuid=2dc40fde-6e02-4684-994c-c63536d17900, infoPort=32779, infoSecurePort=0, ipcPort=38285, storageInfo=lv=-57;cid=testClusterID;nsid=580870846;c=1732530749544) 2024-11-25T10:32:30,190 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb5d6b64f0b92ce2 with lease ID 0x23e0e7c28b618492: from storage DS-7bc9647c-698e-4513-bb20-1ab90bee4719 node DatanodeRegistration(127.0.0.1:32869, datanodeUuid=2dc40fde-6e02-4684-994c-c63536d17900, infoPort=32779, infoSecurePort=0, ipcPort=38285, storageInfo=lv=-57;cid=testClusterID;nsid=580870846;c=1732530749544), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:32:30,192 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f 2024-11-25T10:32:30,195 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/cluster_280f76f3-9441-dd7e-295f-f768874e4966/zookeeper_0, clientPort=58511, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/cluster_280f76f3-9441-dd7e-295f-f768874e4966/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/cluster_280f76f3-9441-dd7e-295f-f768874e4966/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T10:32:30,196 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58511 2024-11-25T10:32:30,196 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:32:30,197 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:32:30,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:32:30,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:32:30,206 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d with version=8 2024-11-25T10:32:30,206 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/hbase-staging 2024-11-25T10:32:30,208 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:32:30,208 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:32:30,208 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:32:30,208 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:32:30,208 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:32:30,208 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:32:30,208 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T10:32:30,208 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:32:30,209 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34293 2024-11-25T10:32:30,211 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34293 connecting to ZooKeeper ensemble=127.0.0.1:58511 2024-11-25T10:32:30,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:342930x0, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:32:30,216 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34293-0x1014eaf2d820000 connected 2024-11-25T10:32:30,233 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:32:30,234 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:32:30,236 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:32:30,236 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d, hbase.cluster.distributed=false 2024-11-25T10:32:30,238 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:32:30,238 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34293 2024-11-25T10:32:30,238 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34293 2024-11-25T10:32:30,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34293 2024-11-25T10:32:30,241 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34293 2024-11-25T10:32:30,243 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34293 2024-11-25T10:32:30,258 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:32:30,258 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:32:30,258 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:32:30,258 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:32:30,258 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:32:30,258 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:32:30,258 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T10:32:30,258 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:32:30,259 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40685 2024-11-25T10:32:30,260 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40685 connecting to ZooKeeper ensemble=127.0.0.1:58511 2024-11-25T10:32:30,261 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:32:30,262 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:32:30,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:406850x0, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:32:30,268 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40685-0x1014eaf2d820001 connected 2024-11-25T10:32:30,268 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:32:30,268 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T10:32:30,268 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T10:32:30,269 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T10:32:30,270 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:32:30,270 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40685 2024-11-25T10:32:30,270 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40685 2024-11-25T10:32:30,271 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40685 2024-11-25T10:32:30,271 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40685 2024-11-25T10:32:30,271 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40685 2024-11-25T10:32:30,283 DEBUG [M:0;e2fd862922a0:34293 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e2fd862922a0:34293 2024-11-25T10:32:30,283 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e2fd862922a0,34293,1732530750208 2024-11-25T10:32:30,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:32:30,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:32:30,285 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e2fd862922a0,34293,1732530750208 2024-11-25T10:32:30,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T10:32:30,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:30,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:30,287 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T10:32:30,287 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e2fd862922a0,34293,1732530750208 from backup master directory 2024-11-25T10:32:30,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e2fd862922a0,34293,1732530750208 2024-11-25T10:32:30,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:32:30,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:32:30,289 WARN [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:32:30,289 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e2fd862922a0,34293,1732530750208 2024-11-25T10:32:30,292 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/hbase.id] with ID: d9e4d397-a29e-410b-942e-ba4e266be0e1 2024-11-25T10:32:30,292 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/.tmp/hbase.id 2024-11-25T10:32:30,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741826_1002 (size=42) 2024-11-25T10:32:30,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741826_1002 (size=42) 2024-11-25T10:32:30,300 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/.tmp/hbase.id]:[hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/hbase.id] 2024-11-25T10:32:30,310 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:32:30,310 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T10:32:30,311 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-25T10:32:30,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:30,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:30,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:32:30,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:32:30,323 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T10:32:30,323 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T10:32:30,324 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:32:30,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741828_1004 (size=1189) 2024-11-25T10:32:30,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741828_1004 (size=1189) 2024-11-25T10:32:30,333 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store 2024-11-25T10:32:30,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741829_1005 (size=34) 2024-11-25T10:32:30,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741829_1005 (size=34) 2024-11-25T10:32:30,339 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:32:30,339 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T10:32:30,339 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:32:30,339 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:32:30,339 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T10:32:30,339 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:32:30,339 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:32:30,339 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732530750339Disabling compacts and flushes for region at 1732530750339Disabling writes for close at 1732530750339Writing region close event to WAL at 1732530750339Closed at 1732530750339 2024-11-25T10:32:30,340 WARN [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/.initializing 2024-11-25T10:32:30,340 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/WALs/e2fd862922a0,34293,1732530750208 2024-11-25T10:32:30,342 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C34293%2C1732530750208, suffix=, logDir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/WALs/e2fd862922a0,34293,1732530750208, archiveDir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/oldWALs, maxLogs=10 2024-11-25T10:32:30,343 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C34293%2C1732530750208.1732530750342 2024-11-25T10:32:30,347 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/WALs/e2fd862922a0,34293,1732530750208/e2fd862922a0%2C34293%2C1732530750208.1732530750342 2024-11-25T10:32:30,347 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40965:40965),(127.0.0.1/127.0.0.1:32779:32779)] 2024-11-25T10:32:30,348 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:32:30,348 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:32:30,348 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:32:30,348 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:32:30,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:32:30,352 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T10:32:30,353 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:30,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:32:30,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:32:30,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T10:32:30,354 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:30,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:32:30,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:32:30,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T10:32:30,355 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:30,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:32:30,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:32:30,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T10:32:30,356 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:30,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:32:30,357 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:32:30,357 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:32:30,358 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:32:30,359 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:32:30,359 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:32:30,359 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T10:32:30,360 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:32:30,365 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:32:30,365 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=823182, jitterRate=0.04673033952713013}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T10:32:30,366 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732530750348Initializing all the Stores at 1732530750349 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530750349Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530750351 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530750351Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530750351Cleaning up temporary data from old regions at 1732530750359 (+8 ms)Region opened successfully at 1732530750366 (+7 ms) 2024-11-25T10:32:30,366 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T10:32:30,368 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d669f97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:32:30,369 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T10:32:30,369 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T10:32:30,369 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T10:32:30,370 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T10:32:30,370 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T10:32:30,370 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T10:32:30,370 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T10:32:30,372 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T10:32:30,373 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T10:32:30,374 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T10:32:30,374 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T10:32:30,374 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T10:32:30,375 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T10:32:30,376 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T10:32:30,376 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T10:32:30,377 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T10:32:30,378 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T10:32:30,381 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T10:32:30,382 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T10:32:30,383 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T10:32:30,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T10:32:30,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T10:32:30,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:30,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:30,385 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e2fd862922a0,34293,1732530750208, sessionid=0x1014eaf2d820000, setting cluster-up flag (Was=false) 2024-11-25T10:32:30,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:30,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:30,393 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T10:32:30,394 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2fd862922a0,34293,1732530750208 2024-11-25T10:32:30,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:30,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:30,402 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T10:32:30,403 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2fd862922a0,34293,1732530750208 2024-11-25T10:32:30,404 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T10:32:30,405 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T10:32:30,405 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T10:32:30,406 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T10:32:30,406 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e2fd862922a0,34293,1732530750208 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T10:32:30,407 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:32:30,407 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:32:30,407 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:32:30,407 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:32:30,407 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e2fd862922a0:0, corePoolSize=10, maxPoolSize=10 2024-11-25T10:32:30,407 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:32:30,407 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:32:30,407 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:32:30,408 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732530780408 2024-11-25T10:32:30,408 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T10:32:30,408 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T10:32:30,408 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T10:32:30,408 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T10:32:30,408 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T10:32:30,408 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T10:32:30,408 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:30,409 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:32:30,409 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T10:32:30,409 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T10:32:30,409 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T10:32:30,409 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T10:32:30,409 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T10:32:30,409 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T10:32:30,410 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:30,410 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T10:32:30,410 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530750409,5,FailOnTimeoutGroup] 2024-11-25T10:32:30,411 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530750410,5,FailOnTimeoutGroup] 2024-11-25T10:32:30,411 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:30,411 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T10:32:30,411 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:30,411 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:30,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:32:30,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:32:30,418 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T10:32:30,419 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d 2024-11-25T10:32:30,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:32:30,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:32:30,425 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:32:30,425 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T10:32:30,426 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T10:32:30,427 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:30,427 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:32:30,427 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T10:32:30,428 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T10:32:30,428 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:30,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:32:30,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T10:32:30,429 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T10:32:30,429 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:30,429 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:32:30,430 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T10:32:30,430 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T10:32:30,430 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:30,431 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:32:30,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T10:32:30,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740 2024-11-25T10:32:30,432 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740 2024-11-25T10:32:30,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T10:32:30,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T10:32:30,433 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T10:32:30,434 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T10:32:30,435 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:32:30,436 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=754912, jitterRate=-0.04007992148399353}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T10:32:30,436 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732530750425Initializing all the Stores at 1732530750425Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530750425Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530750425Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530750425Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530750425Cleaning up temporary data from old regions at 1732530750433 (+8 ms)Region opened successfully at 1732530750436 (+3 ms) 2024-11-25T10:32:30,437 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T10:32:30,437 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T10:32:30,437 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T10:32:30,437 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T10:32:30,437 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T10:32:30,437 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T10:32:30,437 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732530750436Disabling compacts and flushes for region at 1732530750436Disabling writes for close at 1732530750437 (+1 ms)Writing region close event to WAL at 1732530750437Closed at 1732530750437 2024-11-25T10:32:30,438 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:32:30,438 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T10:32:30,438 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T10:32:30,439 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T10:32:30,440 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T10:32:30,473 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(746): ClusterId : d9e4d397-a29e-410b-942e-ba4e266be0e1 2024-11-25T10:32:30,473 DEBUG [RS:0;e2fd862922a0:40685 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T10:32:30,475 DEBUG [RS:0;e2fd862922a0:40685 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T10:32:30,475 DEBUG [RS:0;e2fd862922a0:40685 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T10:32:30,477 DEBUG [RS:0;e2fd862922a0:40685 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T10:32:30,477 DEBUG [RS:0;e2fd862922a0:40685 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29fb3c79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:32:30,489 DEBUG [RS:0;e2fd862922a0:40685 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e2fd862922a0:40685 2024-11-25T10:32:30,489 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T10:32:30,489 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T10:32:30,489 DEBUG [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T10:32:30,490 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2fd862922a0,34293,1732530750208 with port=40685, startcode=1732530750258 2024-11-25T10:32:30,490 DEBUG [RS:0;e2fd862922a0:40685 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T10:32:30,492 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38877, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T10:32:30,492 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34293 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2fd862922a0,40685,1732530750258 2024-11-25T10:32:30,492 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34293 {}] master.ServerManager(517): Registering regionserver=e2fd862922a0,40685,1732530750258 2024-11-25T10:32:30,494 DEBUG [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d 2024-11-25T10:32:30,494 DEBUG [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40299 2024-11-25T10:32:30,494 DEBUG [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T10:32:30,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:32:30,496 DEBUG [RS:0;e2fd862922a0:40685 {}] zookeeper.ZKUtil(111): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2fd862922a0,40685,1732530750258 2024-11-25T10:32:30,496 WARN [RS:0;e2fd862922a0:40685 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:32:30,496 INFO [RS:0;e2fd862922a0:40685 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:32:30,496 DEBUG [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/WALs/e2fd862922a0,40685,1732530750258 2024-11-25T10:32:30,496 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2fd862922a0,40685,1732530750258] 2024-11-25T10:32:30,499 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T10:32:30,500 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T10:32:30,501 INFO [RS:0;e2fd862922a0:40685 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T10:32:30,501 INFO [RS:0;e2fd862922a0:40685 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:30,502 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T10:32:30,502 INFO [RS:0;e2fd862922a0:40685 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T10:32:30,502 INFO [RS:0;e2fd862922a0:40685 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:30,503 DEBUG [RS:0;e2fd862922a0:40685 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:32:30,503 DEBUG [RS:0;e2fd862922a0:40685 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:32:30,503 DEBUG [RS:0;e2fd862922a0:40685 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:32:30,503 DEBUG [RS:0;e2fd862922a0:40685 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:32:30,503 DEBUG [RS:0;e2fd862922a0:40685 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:32:30,503 DEBUG [RS:0;e2fd862922a0:40685 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:32:30,503 DEBUG [RS:0;e2fd862922a0:40685 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:32:30,503 DEBUG [RS:0;e2fd862922a0:40685 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:32:30,503 DEBUG [RS:0;e2fd862922a0:40685 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:32:30,503 DEBUG [RS:0;e2fd862922a0:40685 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:32:30,503 DEBUG [RS:0;e2fd862922a0:40685 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:32:30,503 DEBUG [RS:0;e2fd862922a0:40685 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:32:30,503 DEBUG [RS:0;e2fd862922a0:40685 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:32:30,503 DEBUG [RS:0;e2fd862922a0:40685 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:32:30,503 INFO [RS:0;e2fd862922a0:40685 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:30,503 INFO [RS:0;e2fd862922a0:40685 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:30,504 INFO [RS:0;e2fd862922a0:40685 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:30,504 INFO [RS:0;e2fd862922a0:40685 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:30,504 INFO [RS:0;e2fd862922a0:40685 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:30,504 INFO [RS:0;e2fd862922a0:40685 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,40685,1732530750258-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:32:30,518 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T10:32:30,518 INFO [RS:0;e2fd862922a0:40685 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,40685,1732530750258-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:30,518 INFO [RS:0;e2fd862922a0:40685 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:30,518 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.Replication(171): e2fd862922a0,40685,1732530750258 started 2024-11-25T10:32:30,532 INFO [RS:0;e2fd862922a0:40685 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:30,532 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(1482): Serving as e2fd862922a0,40685,1732530750258, RpcServer on e2fd862922a0/172.17.0.2:40685, sessionid=0x1014eaf2d820001 2024-11-25T10:32:30,532 DEBUG [RS:0;e2fd862922a0:40685 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T10:32:30,533 DEBUG [RS:0;e2fd862922a0:40685 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2fd862922a0,40685,1732530750258 2024-11-25T10:32:30,533 DEBUG [RS:0;e2fd862922a0:40685 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,40685,1732530750258' 2024-11-25T10:32:30,533 DEBUG [RS:0;e2fd862922a0:40685 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T10:32:30,533 DEBUG [RS:0;e2fd862922a0:40685 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T10:32:30,533 DEBUG [RS:0;e2fd862922a0:40685 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T10:32:30,533 DEBUG [RS:0;e2fd862922a0:40685 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T10:32:30,533 DEBUG [RS:0;e2fd862922a0:40685 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2fd862922a0,40685,1732530750258 2024-11-25T10:32:30,533 DEBUG [RS:0;e2fd862922a0:40685 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,40685,1732530750258' 2024-11-25T10:32:30,533 DEBUG [RS:0;e2fd862922a0:40685 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T10:32:30,534 DEBUG [RS:0;e2fd862922a0:40685 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T10:32:30,534 DEBUG [RS:0;e2fd862922a0:40685 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T10:32:30,534 INFO [RS:0;e2fd862922a0:40685 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T10:32:30,534 INFO [RS:0;e2fd862922a0:40685 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T10:32:30,590 WARN [e2fd862922a0:34293 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T10:32:30,636 INFO [RS:0;e2fd862922a0:40685 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C40685%2C1732530750258, suffix=, logDir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/WALs/e2fd862922a0,40685,1732530750258, archiveDir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/oldWALs, maxLogs=32 2024-11-25T10:32:30,637 INFO [RS:0;e2fd862922a0:40685 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C40685%2C1732530750258.1732530750637 2024-11-25T10:32:30,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:30,642 INFO [RS:0;e2fd862922a0:40685 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/WALs/e2fd862922a0,40685,1732530750258/e2fd862922a0%2C40685%2C1732530750258.1732530750637 2024-11-25T10:32:30,643 DEBUG [RS:0;e2fd862922a0:40685 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32779:32779),(127.0.0.1/127.0.0.1:40965:40965)] 2024-11-25T10:32:30,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:30,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:30,840 DEBUG [e2fd862922a0:34293 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T10:32:30,841 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e2fd862922a0,40685,1732530750258 2024-11-25T10:32:30,842 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2fd862922a0,40685,1732530750258, state=OPENING 2024-11-25T10:32:30,845 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T10:32:30,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:30,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:32:30,847 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T10:32:30,847 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:32:30,847 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:32:30,847 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e2fd862922a0,40685,1732530750258}] 2024-11-25T10:32:31,000 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T10:32:31,001 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35837, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T10:32:31,005 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T10:32:31,005 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:32:31,006 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C40685%2C1732530750258.meta, suffix=.meta, logDir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/WALs/e2fd862922a0,40685,1732530750258, archiveDir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/oldWALs, maxLogs=32 2024-11-25T10:32:31,007 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C40685%2C1732530750258.meta.1732530751007.meta 2024-11-25T10:32:31,012 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/WALs/e2fd862922a0,40685,1732530750258/e2fd862922a0%2C40685%2C1732530750258.meta.1732530751007.meta 2024-11-25T10:32:31,016 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40965:40965),(127.0.0.1/127.0.0.1:32779:32779)] 2024-11-25T10:32:31,024 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:32:31,025 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T10:32:31,025 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T10:32:31,025 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T10:32:31,025 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T10:32:31,025 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:32:31,025 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T10:32:31,025 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T10:32:31,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T10:32:31,027 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T10:32:31,027 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:31,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:32:31,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T10:32:31,028 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T10:32:31,028 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:31,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:32:31,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T10:32:31,029 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T10:32:31,029 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:31,029 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:32:31,029 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T10:32:31,030 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T10:32:31,030 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:31,030 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:32:31,031 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T10:32:31,031 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740 2024-11-25T10:32:31,032 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740 2024-11-25T10:32:31,033 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T10:32:31,033 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T10:32:31,034 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T10:32:31,035 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T10:32:31,035 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=793480, jitterRate=0.008962064981460571}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T10:32:31,035 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T10:32:31,036 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732530751025Writing region info on filesystem at 1732530751025Initializing all the Stores at 1732530751026 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530751026Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530751026Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530751026Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530751026Cleaning up temporary data from old regions at 1732530751033 (+7 ms)Running coprocessor post-open hooks at 1732530751035 (+2 ms)Region opened successfully at 1732530751036 (+1 ms) 2024-11-25T10:32:31,037 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732530750999 2024-11-25T10:32:31,040 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T10:32:31,040 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T10:32:31,041 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e2fd862922a0,40685,1732530750258 2024-11-25T10:32:31,041 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2fd862922a0,40685,1732530750258, state=OPEN 2024-11-25T10:32:31,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T10:32:31,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T10:32:31,045 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e2fd862922a0,40685,1732530750258 2024-11-25T10:32:31,045 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:32:31,045 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:32:31,048 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T10:32:31,048 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e2fd862922a0,40685,1732530750258 in 198 msec 2024-11-25T10:32:31,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T10:32:31,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 610 msec 2024-11-25T10:32:31,051 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:32:31,051 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T10:32:31,052 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T10:32:31,052 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2fd862922a0,40685,1732530750258, seqNum=-1] 2024-11-25T10:32:31,052 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T10:32:31,054 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42079, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T10:32:31,058 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 653 msec 2024-11-25T10:32:31,058 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732530751058, completionTime=-1 2024-11-25T10:32:31,058 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T10:32:31,058 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T10:32:31,060 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T10:32:31,060 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732530811060 2024-11-25T10:32:31,060 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732530871060 2024-11-25T10:32:31,060 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-25T10:32:31,061 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,34293,1732530750208-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:31,061 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,34293,1732530750208-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:31,061 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,34293,1732530750208-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:31,061 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e2fd862922a0:34293, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:31,061 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:31,061 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:31,062 DEBUG [master/e2fd862922a0:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T10:32:31,064 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.775sec 2024-11-25T10:32:31,065 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T10:32:31,065 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T10:32:31,065 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T10:32:31,065 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T10:32:31,065 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T10:32:31,065 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,34293,1732530750208-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:32:31,065 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,34293,1732530750208-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T10:32:31,067 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T10:32:31,067 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T10:32:31,067 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,34293,1732530750208-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:32:31,073 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@268a31fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:32:31,073 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e2fd862922a0,34293,-1 for getting cluster id 2024-11-25T10:32:31,073 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T10:32:31,075 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd9e4d397-a29e-410b-942e-ba4e266be0e1' 2024-11-25T10:32:31,075 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T10:32:31,075 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d9e4d397-a29e-410b-942e-ba4e266be0e1" 2024-11-25T10:32:31,075 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61349d58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:32:31,075 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2fd862922a0,34293,-1] 2024-11-25T10:32:31,075 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T10:32:31,076 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:32:31,077 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52054, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T10:32:31,077 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30ce08b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:32:31,078 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T10:32:31,078 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2fd862922a0,40685,1732530750258, seqNum=-1] 2024-11-25T10:32:31,079 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T10:32:31,079 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57070, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T10:32:31,081 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e2fd862922a0,34293,1732530750208 2024-11-25T10:32:31,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:32:31,083 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T10:32:31,083 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-25T10:32:31,084 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is e2fd862922a0,34293,1732530750208 2024-11-25T10:32:31,084 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2319c43c 2024-11-25T10:32:31,084 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-25T10:32:31,085 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52070, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-25T10:32:31,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34293 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-25T10:32:31,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34293 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-25T10:32:31,086 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34293 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T10:32:31,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34293 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-25T10:32:31,088 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-25T10:32:31,088 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:31,088 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34293 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-25T10:32:31,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34293 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T10:32:31,089 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-25T10:32:31,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741835_1011 (size=381) 2024-11-25T10:32:31,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741835_1011 (size=381) 2024-11-25T10:32:31,096 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => db362bd9276f0f7b0aea2f60885c6e5f, NAME => 'TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d 2024-11-25T10:32:31,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741836_1012 (size=64) 2024-11-25T10:32:31,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741836_1012 (size=64) 2024-11-25T10:32:31,104 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:32:31,104 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing db362bd9276f0f7b0aea2f60885c6e5f, disabling compactions & flushes 2024-11-25T10:32:31,104 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. 2024-11-25T10:32:31,104 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. 2024-11-25T10:32:31,104 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. after waiting 0 ms 2024-11-25T10:32:31,104 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. 2024-11-25T10:32:31,104 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. 2024-11-25T10:32:31,104 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for db362bd9276f0f7b0aea2f60885c6e5f: Waiting for close lock at 1732530751104Disabling compacts and flushes for region at 1732530751104Disabling writes for close at 1732530751104Writing region close event to WAL at 1732530751104Closed at 1732530751104 2024-11-25T10:32:31,105 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-25T10:32:31,105 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732530751105"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732530751105"}]},"ts":"1732530751105"} 2024-11-25T10:32:31,107 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-25T10:32:31,108 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-25T10:32:31,109 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732530751108"}]},"ts":"1732530751108"} 2024-11-25T10:32:31,111 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-25T10:32:31,111 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db362bd9276f0f7b0aea2f60885c6e5f, ASSIGN}] 2024-11-25T10:32:31,112 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db362bd9276f0f7b0aea2f60885c6e5f, ASSIGN 2024-11-25T10:32:31,113 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db362bd9276f0f7b0aea2f60885c6e5f, ASSIGN; state=OFFLINE, location=e2fd862922a0,40685,1732530750258; forceNewPlan=false, retain=false 2024-11-25T10:32:31,264 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=db362bd9276f0f7b0aea2f60885c6e5f, regionState=OPENING, regionLocation=e2fd862922a0,40685,1732530750258 2024-11-25T10:32:31,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db362bd9276f0f7b0aea2f60885c6e5f, ASSIGN because future has completed 2024-11-25T10:32:31,267 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure db362bd9276f0f7b0aea2f60885c6e5f, server=e2fd862922a0,40685,1732530750258}] 2024-11-25T10:32:31,423 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. 2024-11-25T10:32:31,423 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => db362bd9276f0f7b0aea2f60885c6e5f, NAME => 'TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:32:31,423 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:31,423 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:32:31,423 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:31,424 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:31,425 INFO [StoreOpener-db362bd9276f0f7b0aea2f60885c6e5f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:31,426 INFO [StoreOpener-db362bd9276f0f7b0aea2f60885c6e5f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region db362bd9276f0f7b0aea2f60885c6e5f columnFamilyName info 2024-11-25T10:32:31,426 DEBUG [StoreOpener-db362bd9276f0f7b0aea2f60885c6e5f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:31,426 INFO [StoreOpener-db362bd9276f0f7b0aea2f60885c6e5f-1 {}] regionserver.HStore(327): Store=db362bd9276f0f7b0aea2f60885c6e5f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:32:31,427 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:31,427 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:31,427 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:31,428 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:31,428 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:31,429 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:31,431 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:32:31,431 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened db362bd9276f0f7b0aea2f60885c6e5f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=861543, jitterRate=0.09550929069519043}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T10:32:31,431 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:31,432 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for db362bd9276f0f7b0aea2f60885c6e5f: Running coprocessor pre-open hook at 1732530751424Writing region info on filesystem at 1732530751424Initializing all the Stores at 1732530751424Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530751424Cleaning up temporary data from old regions at 1732530751428 (+4 ms)Running coprocessor post-open hooks at 1732530751431 (+3 ms)Region opened successfully at 1732530751432 (+1 ms) 2024-11-25T10:32:31,433 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f., pid=6, masterSystemTime=1732530751419 2024-11-25T10:32:31,435 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. 2024-11-25T10:32:31,435 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. 2024-11-25T10:32:31,436 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=db362bd9276f0f7b0aea2f60885c6e5f, regionState=OPEN, openSeqNum=2, regionLocation=e2fd862922a0,40685,1732530750258 2024-11-25T10:32:31,438 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure db362bd9276f0f7b0aea2f60885c6e5f, server=e2fd862922a0,40685,1732530750258 because future has completed 2024-11-25T10:32:31,441 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-25T10:32:31,441 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure db362bd9276f0f7b0aea2f60885c6e5f, server=e2fd862922a0,40685,1732530750258 in 172 msec 2024-11-25T10:32:31,443 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-25T10:32:31,443 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db362bd9276f0f7b0aea2f60885c6e5f, ASSIGN in 330 msec 2024-11-25T10:32:31,444 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-25T10:32:31,444 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732530751444"}]},"ts":"1732530751444"} 2024-11-25T10:32:31,446 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-25T10:32:31,447 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-25T10:32:31,449 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 361 msec 2024-11-25T10:32:31,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:31,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:31,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:32,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:32,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:32,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:33,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:33,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:33,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:34,057 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,057 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,057 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,057 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,057 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,057 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,058 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,071 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,072 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,074 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,074 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,074 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,580 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T10:32:34,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:34,610 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-25T10:32:34,610 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-25T10:32:34,611 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-25T10:32:34,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:34,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:34,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:35,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:35,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:35,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:36,499 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T10:32:36,499 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-25T10:32:36,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:36,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:36,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:37,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:37,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:37,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:38,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:38,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:38,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:39,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:39,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:39,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:40,113 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T10:32:40,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,114 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,115 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,136 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,137 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:40,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:40,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:40,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:41,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34293 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-25T10:32:41,122 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-25T10:32:41,122 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-25T10:32:41,125 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-25T10:32:41,125 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. 2024-11-25T10:32:41,127 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f., hostname=e2fd862922a0,40685,1732530750258, seqNum=2] 2024-11-25T10:32:41,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:41,139 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing db362bd9276f0f7b0aea2f60885c6e5f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T10:32:41,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/f5f00e9ce72d42739d0b1daf8c4d3646 is 1080, key is row0001/info:/1732530761129/Put/seqid=0 2024-11-25T10:32:41,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741837_1013 (size=12509) 2024-11-25T10:32:41,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741837_1013 (size=12509) 2024-11-25T10:32:41,169 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/f5f00e9ce72d42739d0b1daf8c4d3646 2024-11-25T10:32:41,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=db362bd9276f0f7b0aea2f60885c6e5f, server=e2fd862922a0,40685,1732530750258 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-25T10:32:41,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/f5f00e9ce72d42739d0b1daf8c4d3646 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/f5f00e9ce72d42739d0b1daf8c4d3646 2024-11-25T10:32:41,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:57070 deadline: 1732530771173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=db362bd9276f0f7b0aea2f60885c6e5f, server=e2fd862922a0,40685,1732530750258 2024-11-25T10:32:41,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/f5f00e9ce72d42739d0b1daf8c4d3646, entries=7, sequenceid=11, filesize=12.2 K 2024-11-25T10:32:41,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for db362bd9276f0f7b0aea2f60885c6e5f in 43ms, sequenceid=11, compaction requested=false 2024-11-25T10:32:41,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for db362bd9276f0f7b0aea2f60885c6e5f: 2024-11-25T10:32:41,208 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f., hostname=e2fd862922a0,40685,1732530750258, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f., hostname=e2fd862922a0,40685,1732530750258, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=db362bd9276f0f7b0aea2f60885c6e5f, server=e2fd862922a0,40685,1732530750258 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T10:32:41,209 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f., hostname=e2fd862922a0,40685,1732530750258, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=db362bd9276f0f7b0aea2f60885c6e5f, server=e2fd862922a0,40685,1732530750258 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T10:32:41,209 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f., hostname=e2fd862922a0,40685,1732530750258, seqNum=2 because the exception is null or not the one we care about 2024-11-25T10:32:41,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:41,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:41,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:42,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:42,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:42,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:43,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:43,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:43,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:44,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:44,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:44,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:45,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:45,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:45,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:46,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:46,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:46,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:47,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:47,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:47,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:48,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:48,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:48,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:49,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:49,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:49,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:50,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:50,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:50,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:51,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:51,273 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing db362bd9276f0f7b0aea2f60885c6e5f 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-25T10:32:51,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/37fd76eb02984312ba65c42b18dcfe7c is 1080, key is row0008/info:/1732530761140/Put/seqid=0 2024-11-25T10:32:51,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741838_1014 (size=29761) 2024-11-25T10:32:51,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741838_1014 (size=29761) 2024-11-25T10:32:51,283 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/37fd76eb02984312ba65c42b18dcfe7c 2024-11-25T10:32:51,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/37fd76eb02984312ba65c42b18dcfe7c as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/37fd76eb02984312ba65c42b18dcfe7c 2024-11-25T10:32:51,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/37fd76eb02984312ba65c42b18dcfe7c, entries=23, sequenceid=37, filesize=29.1 K 2024-11-25T10:32:51,294 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for db362bd9276f0f7b0aea2f60885c6e5f in 22ms, sequenceid=37, compaction requested=false 2024-11-25T10:32:51,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for db362bd9276f0f7b0aea2f60885c6e5f: 2024-11-25T10:32:51,294 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-25T10:32:51,294 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:32:51,294 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/37fd76eb02984312ba65c42b18dcfe7c because midkey is the same as first or last row 2024-11-25T10:32:51,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:51,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:51,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:52,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:52,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:52,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:53,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing db362bd9276f0f7b0aea2f60885c6e5f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T10:32:53,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/80a5582bc91643318a7551c5e8f2790a is 1080, key is row0031/info:/1732530771274/Put/seqid=0 2024-11-25T10:32:53,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741839_1015 (size=12509) 2024-11-25T10:32:53,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741839_1015 (size=12509) 2024-11-25T10:32:53,296 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/80a5582bc91643318a7551c5e8f2790a 2024-11-25T10:32:53,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/80a5582bc91643318a7551c5e8f2790a as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/80a5582bc91643318a7551c5e8f2790a 2024-11-25T10:32:53,306 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/80a5582bc91643318a7551c5e8f2790a, entries=7, sequenceid=47, filesize=12.2 K 2024-11-25T10:32:53,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for db362bd9276f0f7b0aea2f60885c6e5f in 22ms, sequenceid=47, compaction requested=true 2024-11-25T10:32:53,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for db362bd9276f0f7b0aea2f60885c6e5f: 2024-11-25T10:32:53,307 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-25T10:32:53,307 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:32:53,307 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/37fd76eb02984312ba65c42b18dcfe7c because midkey is the same as first or last row 2024-11-25T10:32:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:53,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db362bd9276f0f7b0aea2f60885c6e5f:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T10:32:53,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:32:53,308 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing db362bd9276f0f7b0aea2f60885c6e5f 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-25T10:32:53,308 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T10:32:53,309 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T10:32:53,310 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1541): db362bd9276f0f7b0aea2f60885c6e5f/info is initiating minor compaction (all files) 2024-11-25T10:32:53,310 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of db362bd9276f0f7b0aea2f60885c6e5f/info in TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. 2024-11-25T10:32:53,310 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/f5f00e9ce72d42739d0b1daf8c4d3646, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/37fd76eb02984312ba65c42b18dcfe7c, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/80a5582bc91643318a7551c5e8f2790a] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp, totalSize=53.5 K 2024-11-25T10:32:53,310 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting f5f00e9ce72d42739d0b1daf8c4d3646, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732530761129 2024-11-25T10:32:53,311 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 37fd76eb02984312ba65c42b18dcfe7c, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732530761140 2024-11-25T10:32:53,311 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 80a5582bc91643318a7551c5e8f2790a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732530771274 2024-11-25T10:32:53,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/82c132e0cc424341aeea73706704065a is 1080, key is row0038/info:/1732530773286/Put/seqid=0 2024-11-25T10:32:53,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741840_1016 (size=20064) 2024-11-25T10:32:53,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741840_1016 (size=20064) 2024-11-25T10:32:53,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/82c132e0cc424341aeea73706704065a 2024-11-25T10:32:53,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/82c132e0cc424341aeea73706704065a as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/82c132e0cc424341aeea73706704065a 2024-11-25T10:32:53,326 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db362bd9276f0f7b0aea2f60885c6e5f#info#compaction#58 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:32:53,327 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/99c84461dfeb40ffa32d4034097075d8 is 1080, key is row0001/info:/1732530761129/Put/seqid=0 2024-11-25T10:32:53,331 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/82c132e0cc424341aeea73706704065a, entries=14, sequenceid=64, filesize=19.6 K 2024-11-25T10:32:53,332 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for db362bd9276f0f7b0aea2f60885c6e5f in 24ms, sequenceid=64, compaction requested=false 2024-11-25T10:32:53,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for db362bd9276f0f7b0aea2f60885c6e5f: 2024-11-25T10:32:53,332 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.1 K, sizeToCheck=16.0 K 2024-11-25T10:32:53,332 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:32:53,332 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/37fd76eb02984312ba65c42b18dcfe7c because midkey is the same as first or last row 2024-11-25T10:32:53,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741841_1017 (size=44978) 2024-11-25T10:32:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:53,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741841_1017 (size=44978) 2024-11-25T10:32:53,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing db362bd9276f0f7b0aea2f60885c6e5f 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-25T10:32:53,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/aaedc8168dd24adaa9ae97af9a4bb93e is 1080, key is row0052/info:/1732530773309/Put/seqid=0 2024-11-25T10:32:53,339 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/99c84461dfeb40ffa32d4034097075d8 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/99c84461dfeb40ffa32d4034097075d8 2024-11-25T10:32:53,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741842_1018 (size=18987) 2024-11-25T10:32:53,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741842_1018 (size=18987) 2024-11-25T10:32:53,342 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/aaedc8168dd24adaa9ae97af9a4bb93e 2024-11-25T10:32:53,345 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in db362bd9276f0f7b0aea2f60885c6e5f/info of db362bd9276f0f7b0aea2f60885c6e5f into 99c84461dfeb40ffa32d4034097075d8(size=43.9 K), total size for store is 63.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:32:53,345 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for db362bd9276f0f7b0aea2f60885c6e5f: 2024-11-25T10:32:53,345 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f., storeName=db362bd9276f0f7b0aea2f60885c6e5f/info, priority=13, startTime=1732530773307; duration=0sec 2024-11-25T10:32:53,345 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-25T10:32:53,345 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:32:53,346 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/99c84461dfeb40ffa32d4034097075d8 because midkey is the same as first or last row 2024-11-25T10:32:53,346 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-25T10:32:53,346 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:32:53,346 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/99c84461dfeb40ffa32d4034097075d8 because midkey is the same as first or last row 2024-11-25T10:32:53,346 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-11-25T10:32:53,346 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:32:53,346 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/99c84461dfeb40ffa32d4034097075d8 because midkey is the same as first or last row 2024-11-25T10:32:53,346 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:32:53,346 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db362bd9276f0f7b0aea2f60885c6e5f:info 2024-11-25T10:32:53,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/aaedc8168dd24adaa9ae97af9a4bb93e as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/aaedc8168dd24adaa9ae97af9a4bb93e 2024-11-25T10:32:53,351 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/aaedc8168dd24adaa9ae97af9a4bb93e, entries=13, sequenceid=80, filesize=18.5 K 2024-11-25T10:32:53,352 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=0 B/0 for db362bd9276f0f7b0aea2f60885c6e5f in 19ms, sequenceid=80, compaction requested=true 2024-11-25T10:32:53,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for db362bd9276f0f7b0aea2f60885c6e5f: 2024-11-25T10:32:53,352 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=82.1 K, sizeToCheck=16.0 K 2024-11-25T10:32:53,352 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:32:53,352 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/99c84461dfeb40ffa32d4034097075d8 because midkey is the same as first or last row 2024-11-25T10:32:53,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store db362bd9276f0f7b0aea2f60885c6e5f:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T10:32:53,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:32:53,353 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T10:32:53,353 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84029 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T10:32:53,353 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1541): db362bd9276f0f7b0aea2f60885c6e5f/info is initiating minor compaction (all files) 2024-11-25T10:32:53,354 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of db362bd9276f0f7b0aea2f60885c6e5f/info in TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. 2024-11-25T10:32:53,354 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/99c84461dfeb40ffa32d4034097075d8, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/82c132e0cc424341aeea73706704065a, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/aaedc8168dd24adaa9ae97af9a4bb93e] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp, totalSize=82.1 K 2024-11-25T10:32:53,354 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 99c84461dfeb40ffa32d4034097075d8, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732530761129 2024-11-25T10:32:53,354 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 82c132e0cc424341aeea73706704065a, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1732530773286 2024-11-25T10:32:53,355 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting aaedc8168dd24adaa9ae97af9a4bb93e, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732530773309 2024-11-25T10:32:53,364 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): db362bd9276f0f7b0aea2f60885c6e5f#info#compaction#60 average throughput is 32.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:32:53,365 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/d5e744653ad34d6eaedff838c54889ba is 1080, key is row0001/info:/1732530761129/Put/seqid=0 2024-11-25T10:32:53,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741843_1019 (size=74301) 2024-11-25T10:32:53,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741843_1019 (size=74301) 2024-11-25T10:32:53,373 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/.tmp/info/d5e744653ad34d6eaedff838c54889ba as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/d5e744653ad34d6eaedff838c54889ba 2024-11-25T10:32:53,379 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in db362bd9276f0f7b0aea2f60885c6e5f/info of db362bd9276f0f7b0aea2f60885c6e5f into d5e744653ad34d6eaedff838c54889ba(size=72.6 K), total size for store is 72.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:32:53,379 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for db362bd9276f0f7b0aea2f60885c6e5f: 2024-11-25T10:32:53,379 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f., storeName=db362bd9276f0f7b0aea2f60885c6e5f/info, priority=13, startTime=1732530773352; duration=0sec 2024-11-25T10:32:53,379 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-25T10:32:53,379 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:32:53,379 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-25T10:32:53,379 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:32:53,379 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-25T10:32:53,379 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-25T10:32:53,380 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:32:53,380 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:32:53,380 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: db362bd9276f0f7b0aea2f60885c6e5f:info 2024-11-25T10:32:53,381 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34293 {}] assignment.AssignmentManager(1363): Split request from e2fd862922a0,40685,1732530750258, parent={ENCODED => db362bd9276f0f7b0aea2f60885c6e5f, NAME => 'TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-25T10:32:53,386 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34293 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=e2fd862922a0,40685,1732530750258 2024-11-25T10:32:53,390 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34293 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=db362bd9276f0f7b0aea2f60885c6e5f, daughterA=40d83bebc06ac5292c7cdc20116583fd, daughterB=156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,391 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=db362bd9276f0f7b0aea2f60885c6e5f, daughterA=40d83bebc06ac5292c7cdc20116583fd, daughterB=156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,391 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=db362bd9276f0f7b0aea2f60885c6e5f, daughterA=40d83bebc06ac5292c7cdc20116583fd, daughterB=156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,391 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=db362bd9276f0f7b0aea2f60885c6e5f, daughterA=40d83bebc06ac5292c7cdc20116583fd, daughterB=156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,398 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db362bd9276f0f7b0aea2f60885c6e5f, UNASSIGN}] 2024-11-25T10:32:53,399 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db362bd9276f0f7b0aea2f60885c6e5f, UNASSIGN 2024-11-25T10:32:53,400 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=db362bd9276f0f7b0aea2f60885c6e5f, regionState=CLOSING, regionLocation=e2fd862922a0,40685,1732530750258 2024-11-25T10:32:53,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db362bd9276f0f7b0aea2f60885c6e5f, UNASSIGN because future has completed 2024-11-25T10:32:53,403 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-25T10:32:53,403 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure db362bd9276f0f7b0aea2f60885c6e5f, server=e2fd862922a0,40685,1732530750258}] 2024-11-25T10:32:53,559 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:53,560 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-25T10:32:53,560 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing db362bd9276f0f7b0aea2f60885c6e5f, disabling compactions & flushes 2024-11-25T10:32:53,560 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. 2024-11-25T10:32:53,560 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. 2024-11-25T10:32:53,560 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. after waiting 0 ms 2024-11-25T10:32:53,560 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. 2024-11-25T10:32:53,561 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/f5f00e9ce72d42739d0b1daf8c4d3646, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/37fd76eb02984312ba65c42b18dcfe7c, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/99c84461dfeb40ffa32d4034097075d8, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/80a5582bc91643318a7551c5e8f2790a, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/82c132e0cc424341aeea73706704065a, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/aaedc8168dd24adaa9ae97af9a4bb93e] to archive 2024-11-25T10:32:53,562 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T10:32:53,564 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/f5f00e9ce72d42739d0b1daf8c4d3646 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/f5f00e9ce72d42739d0b1daf8c4d3646 2024-11-25T10:32:53,565 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/37fd76eb02984312ba65c42b18dcfe7c to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/37fd76eb02984312ba65c42b18dcfe7c 2024-11-25T10:32:53,566 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/99c84461dfeb40ffa32d4034097075d8 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/99c84461dfeb40ffa32d4034097075d8 2024-11-25T10:32:53,567 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/80a5582bc91643318a7551c5e8f2790a to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/80a5582bc91643318a7551c5e8f2790a 2024-11-25T10:32:53,569 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/82c132e0cc424341aeea73706704065a to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/82c132e0cc424341aeea73706704065a 2024-11-25T10:32:53,570 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/aaedc8168dd24adaa9ae97af9a4bb93e to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/aaedc8168dd24adaa9ae97af9a4bb93e 2024-11-25T10:32:53,575 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=1 2024-11-25T10:32:53,576 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. 2024-11-25T10:32:53,576 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for db362bd9276f0f7b0aea2f60885c6e5f: Waiting for close lock at 1732530773560Running coprocessor pre-close hooks at 1732530773560Disabling compacts and flushes for region at 1732530773560Disabling writes for close at 1732530773560Writing region close event to WAL at 1732530773572 (+12 ms)Running coprocessor post-close hooks at 1732530773575 (+3 ms)Closed at 1732530773576 (+1 ms) 2024-11-25T10:32:53,578 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:53,578 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=db362bd9276f0f7b0aea2f60885c6e5f, regionState=CLOSED 2024-11-25T10:32:53,580 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure db362bd9276f0f7b0aea2f60885c6e5f, server=e2fd862922a0,40685,1732530750258 because future has completed 2024-11-25T10:32:53,583 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-25T10:32:53,583 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure db362bd9276f0f7b0aea2f60885c6e5f, server=e2fd862922a0,40685,1732530750258 in 178 msec 2024-11-25T10:32:53,585 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-25T10:32:53,585 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=db362bd9276f0f7b0aea2f60885c6e5f, UNASSIGN in 185 msec 2024-11-25T10:32:53,592 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:53,595 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 1 storefiles, region=db362bd9276f0f7b0aea2f60885c6e5f, threads=1 2024-11-25T10:32:53,596 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/d5e744653ad34d6eaedff838c54889ba for region: db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:53,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741844_1020 (size=27) 2024-11-25T10:32:53,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741844_1020 (size=27) 2024-11-25T10:32:53,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741845_1021 (size=27) 2024-11-25T10:32:53,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741845_1021 (size=27) 2024-11-25T10:32:53,618 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/d5e744653ad34d6eaedff838c54889ba for region: db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:32:53,620 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region db362bd9276f0f7b0aea2f60885c6e5f Daughter A: [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/40d83bebc06ac5292c7cdc20116583fd/info/d5e744653ad34d6eaedff838c54889ba.db362bd9276f0f7b0aea2f60885c6e5f] storefiles, Daughter B: [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d5e744653ad34d6eaedff838c54889ba.db362bd9276f0f7b0aea2f60885c6e5f] storefiles. 2024-11-25T10:32:53,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741846_1022 (size=71) 2024-11-25T10:32:53,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741846_1022 (size=71) 2024-11-25T10:32:53,631 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:53,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741847_1023 (size=71) 2024-11-25T10:32:53,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741847_1023 (size=71) 2024-11-25T10:32:53,643 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:53,650 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/40d83bebc06ac5292c7cdc20116583fd/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-25T10:32:53,652 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-25T10:32:53,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:53,654 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732530773654"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732530773654"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732530773654"}]},"ts":"1732530773654"} 2024-11-25T10:32:53,654 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732530773654"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732530773654"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732530773654"}]},"ts":"1732530773654"} 2024-11-25T10:32:53,654 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732530773654"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732530773654"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732530773654"}]},"ts":"1732530773654"} 2024-11-25T10:32:53,671 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=40d83bebc06ac5292c7cdc20116583fd, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=156ba42f3cf44382c1e5d9b730cfb315, ASSIGN}] 2024-11-25T10:32:53,672 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=40d83bebc06ac5292c7cdc20116583fd, ASSIGN 2024-11-25T10:32:53,672 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=156ba42f3cf44382c1e5d9b730cfb315, ASSIGN 2024-11-25T10:32:53,673 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=40d83bebc06ac5292c7cdc20116583fd, ASSIGN; state=SPLITTING_NEW, location=e2fd862922a0,40685,1732530750258; forceNewPlan=false, retain=false 2024-11-25T10:32:53,673 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=156ba42f3cf44382c1e5d9b730cfb315, ASSIGN; state=SPLITTING_NEW, location=e2fd862922a0,40685,1732530750258; forceNewPlan=false, retain=false 2024-11-25T10:32:53,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:53,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:53,823 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=40d83bebc06ac5292c7cdc20116583fd, regionState=OPENING, regionLocation=e2fd862922a0,40685,1732530750258 2024-11-25T10:32:53,823 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=156ba42f3cf44382c1e5d9b730cfb315, regionState=OPENING, regionLocation=e2fd862922a0,40685,1732530750258 2024-11-25T10:32:53,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=156ba42f3cf44382c1e5d9b730cfb315, ASSIGN because future has completed 2024-11-25T10:32:53,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 156ba42f3cf44382c1e5d9b730cfb315, server=e2fd862922a0,40685,1732530750258}] 2024-11-25T10:32:53,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=40d83bebc06ac5292c7cdc20116583fd, ASSIGN because future has completed 2024-11-25T10:32:53,827 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 40d83bebc06ac5292c7cdc20116583fd, server=e2fd862922a0,40685,1732530750258}] 2024-11-25T10:32:53,982 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:32:53,982 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 156ba42f3cf44382c1e5d9b730cfb315, NAME => 'TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-25T10:32:53,982 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,982 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:32:53,982 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,982 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,984 INFO [StoreOpener-156ba42f3cf44382c1e5d9b730cfb315-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,984 INFO [StoreOpener-156ba42f3cf44382c1e5d9b730cfb315-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 156ba42f3cf44382c1e5d9b730cfb315 columnFamilyName info 2024-11-25T10:32:53,984 DEBUG [StoreOpener-156ba42f3cf44382c1e5d9b730cfb315-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:53,993 DEBUG [StoreOpener-156ba42f3cf44382c1e5d9b730cfb315-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d5e744653ad34d6eaedff838c54889ba.db362bd9276f0f7b0aea2f60885c6e5f->hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/d5e744653ad34d6eaedff838c54889ba-top 2024-11-25T10:32:53,993 INFO [StoreOpener-156ba42f3cf44382c1e5d9b730cfb315-1 {}] regionserver.HStore(327): Store=156ba42f3cf44382c1e5d9b730cfb315/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:32:53,994 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,994 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,995 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,995 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,995 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,997 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,998 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 156ba42f3cf44382c1e5d9b730cfb315; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=870378, jitterRate=0.10674385726451874}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T10:32:53,998 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:32:53,998 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 156ba42f3cf44382c1e5d9b730cfb315: Running coprocessor pre-open hook at 1732530773982Writing region info on filesystem at 1732530773982Initializing all the Stores at 1732530773983 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530773983Cleaning up temporary data from old regions at 1732530773995 (+12 ms)Running coprocessor post-open hooks at 1732530773998 (+3 ms)Region opened successfully at 1732530773998 2024-11-25T10:32:53,999 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315., pid=12, masterSystemTime=1732530773978 2024-11-25T10:32:53,999 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 156ba42f3cf44382c1e5d9b730cfb315:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T10:32:53,999 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:32:53,999 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-25T10:32:54,000 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:32:54,000 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1541): 156ba42f3cf44382c1e5d9b730cfb315/info is initiating minor compaction (all files) 2024-11-25T10:32:54,000 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 156ba42f3cf44382c1e5d9b730cfb315/info in TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:32:54,000 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d5e744653ad34d6eaedff838c54889ba.db362bd9276f0f7b0aea2f60885c6e5f->hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/d5e744653ad34d6eaedff838c54889ba-top] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp, totalSize=72.6 K 2024-11-25T10:32:54,001 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting d5e744653ad34d6eaedff838c54889ba.db362bd9276f0f7b0aea2f60885c6e5f, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732530761129 2024-11-25T10:32:54,001 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:32:54,001 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:32:54,001 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd. 2024-11-25T10:32:54,002 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 40d83bebc06ac5292c7cdc20116583fd, NAME => 'TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-25T10:32:54,002 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 40d83bebc06ac5292c7cdc20116583fd 2024-11-25T10:32:54,002 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:32:54,002 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 40d83bebc06ac5292c7cdc20116583fd 2024-11-25T10:32:54,002 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 40d83bebc06ac5292c7cdc20116583fd 2024-11-25T10:32:54,002 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=156ba42f3cf44382c1e5d9b730cfb315, regionState=OPEN, openSeqNum=86, regionLocation=e2fd862922a0,40685,1732530750258 2024-11-25T10:32:54,003 INFO [StoreOpener-40d83bebc06ac5292c7cdc20116583fd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 40d83bebc06ac5292c7cdc20116583fd 2024-11-25T10:32:54,004 INFO [StoreOpener-40d83bebc06ac5292c7cdc20116583fd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 40d83bebc06ac5292c7cdc20116583fd columnFamilyName info 2024-11-25T10:32:54,004 DEBUG [StoreOpener-40d83bebc06ac5292c7cdc20116583fd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:32:54,004 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-25T10:32:54,004 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-25T10:32:54,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-25T10:32:54,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 156ba42f3cf44382c1e5d9b730cfb315, server=e2fd862922a0,40685,1732530750258 because future has completed 2024-11-25T10:32:54,009 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-25T10:32:54,009 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 156ba42f3cf44382c1e5d9b730cfb315, server=e2fd862922a0,40685,1732530750258 in 180 msec 2024-11-25T10:32:54,011 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=156ba42f3cf44382c1e5d9b730cfb315, ASSIGN in 338 msec 2024-11-25T10:32:54,019 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 156ba42f3cf44382c1e5d9b730cfb315#info#compaction#61 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:32:54,020 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/4761feddc7f244f291fd851d204abef0 is 1080, key is row0062/info:/1732530773328/Put/seqid=0 2024-11-25T10:32:54,021 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/.tmp/info/e6a74446131d4b16802e6f89b165792d is 193, key is TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315./info:regioninfo/1732530774002/Put/seqid=0 2024-11-25T10:32:54,021 DEBUG [StoreOpener-40d83bebc06ac5292c7cdc20116583fd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/40d83bebc06ac5292c7cdc20116583fd/info/d5e744653ad34d6eaedff838c54889ba.db362bd9276f0f7b0aea2f60885c6e5f->hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/d5e744653ad34d6eaedff838c54889ba-bottom 2024-11-25T10:32:54,021 INFO [StoreOpener-40d83bebc06ac5292c7cdc20116583fd-1 {}] regionserver.HStore(327): Store=40d83bebc06ac5292c7cdc20116583fd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:32:54,021 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 40d83bebc06ac5292c7cdc20116583fd 2024-11-25T10:32:54,022 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/40d83bebc06ac5292c7cdc20116583fd 2024-11-25T10:32:54,024 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/40d83bebc06ac5292c7cdc20116583fd 2024-11-25T10:32:54,024 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 40d83bebc06ac5292c7cdc20116583fd 2024-11-25T10:32:54,024 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 40d83bebc06ac5292c7cdc20116583fd 2024-11-25T10:32:54,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741848_1024 (size=8260) 2024-11-25T10:32:54,026 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 40d83bebc06ac5292c7cdc20116583fd 2024-11-25T10:32:54,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741848_1024 (size=8260) 2024-11-25T10:32:54,027 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 40d83bebc06ac5292c7cdc20116583fd; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=715369, jitterRate=-0.09036248922348022}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-25T10:32:54,027 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 40d83bebc06ac5292c7cdc20116583fd 2024-11-25T10:32:54,027 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 40d83bebc06ac5292c7cdc20116583fd: Running coprocessor pre-open hook at 1732530774002Writing region info on filesystem at 1732530774002Initializing all the Stores at 1732530774003 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530774003Cleaning up temporary data from old regions at 1732530774024 (+21 ms)Running coprocessor post-open hooks at 1732530774027 (+3 ms)Region opened successfully at 1732530774027 2024-11-25T10:32:54,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741849_1025 (size=9882) 2024-11-25T10:32:54,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741849_1025 (size=9882) 2024-11-25T10:32:54,029 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd., pid=13, masterSystemTime=1732530773978 2024-11-25T10:32:54,029 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 40d83bebc06ac5292c7cdc20116583fd:info, priority=-2147483648, current under compaction store size is 2 2024-11-25T10:32:54,029 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:32:54,029 DEBUG [RS:0;e2fd862922a0:40685-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-25T10:32:54,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/.tmp/info/e6a74446131d4b16802e6f89b165792d 2024-11-25T10:32:54,030 INFO [RS:0;e2fd862922a0:40685-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd. 2024-11-25T10:32:54,030 DEBUG [RS:0;e2fd862922a0:40685-longCompactions-0 {}] regionserver.HStore(1541): 40d83bebc06ac5292c7cdc20116583fd/info is initiating minor compaction (all files) 2024-11-25T10:32:54,030 INFO [RS:0;e2fd862922a0:40685-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 40d83bebc06ac5292c7cdc20116583fd/info in TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd. 2024-11-25T10:32:54,030 INFO [RS:0;e2fd862922a0:40685-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/40d83bebc06ac5292c7cdc20116583fd/info/d5e744653ad34d6eaedff838c54889ba.db362bd9276f0f7b0aea2f60885c6e5f->hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/d5e744653ad34d6eaedff838c54889ba-bottom] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/40d83bebc06ac5292c7cdc20116583fd/.tmp, totalSize=72.6 K 2024-11-25T10:32:54,031 DEBUG [RS:0;e2fd862922a0:40685-longCompactions-0 {}] compactions.Compactor(225): Compacting d5e744653ad34d6eaedff838c54889ba.db362bd9276f0f7b0aea2f60885c6e5f, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732530761129 2024-11-25T10:32:54,032 DEBUG [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd. 2024-11-25T10:32:54,032 INFO [RS_OPEN_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd. 2024-11-25T10:32:54,032 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=40d83bebc06ac5292c7cdc20116583fd, regionState=OPEN, openSeqNum=86, regionLocation=e2fd862922a0,40685,1732530750258 2024-11-25T10:32:54,033 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/4761feddc7f244f291fd851d204abef0 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4761feddc7f244f291fd851d204abef0 2024-11-25T10:32:54,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 40d83bebc06ac5292c7cdc20116583fd, server=e2fd862922a0,40685,1732530750258 because future has completed 2024-11-25T10:32:54,039 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-25T10:32:54,039 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 40d83bebc06ac5292c7cdc20116583fd, server=e2fd862922a0,40685,1732530750258 in 209 msec 2024-11-25T10:32:54,040 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 156ba42f3cf44382c1e5d9b730cfb315/info of 156ba42f3cf44382c1e5d9b730cfb315 into 4761feddc7f244f291fd851d204abef0(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:32:54,040 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:32:54,040 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315., storeName=156ba42f3cf44382c1e5d9b730cfb315/info, priority=15, startTime=1732530773999; duration=0sec 2024-11-25T10:32:54,040 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:32:54,040 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 156ba42f3cf44382c1e5d9b730cfb315:info 2024-11-25T10:32:54,041 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-25T10:32:54,041 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=40d83bebc06ac5292c7cdc20116583fd, ASSIGN in 368 msec 2024-11-25T10:32:54,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=db362bd9276f0f7b0aea2f60885c6e5f, daughterA=40d83bebc06ac5292c7cdc20116583fd, daughterB=156ba42f3cf44382c1e5d9b730cfb315 in 655 msec 2024-11-25T10:32:54,052 INFO [RS:0;e2fd862922a0:40685-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 40d83bebc06ac5292c7cdc20116583fd#info#compaction#63 average throughput is 62.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:32:54,052 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/.tmp/ns/eb36b4e4f4dc44e08379076bddb99365 is 43, key is default/ns:d/1732530751054/Put/seqid=0 2024-11-25T10:32:54,052 DEBUG [RS:0;e2fd862922a0:40685-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/40d83bebc06ac5292c7cdc20116583fd/.tmp/info/226f9788cc0f4b37808dd79fee0c0c13 is 1080, key is row0001/info:/1732530761129/Put/seqid=0 2024-11-25T10:32:54,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741850_1026 (size=70862) 2024-11-25T10:32:54,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741850_1026 (size=70862) 2024-11-25T10:32:54,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741851_1027 (size=5153) 2024-11-25T10:32:54,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741851_1027 (size=5153) 2024-11-25T10:32:54,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/.tmp/ns/eb36b4e4f4dc44e08379076bddb99365 2024-11-25T10:32:54,064 DEBUG [RS:0;e2fd862922a0:40685-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/40d83bebc06ac5292c7cdc20116583fd/.tmp/info/226f9788cc0f4b37808dd79fee0c0c13 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/40d83bebc06ac5292c7cdc20116583fd/info/226f9788cc0f4b37808dd79fee0c0c13 2024-11-25T10:32:54,069 INFO [RS:0;e2fd862922a0:40685-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 40d83bebc06ac5292c7cdc20116583fd/info of 40d83bebc06ac5292c7cdc20116583fd into 226f9788cc0f4b37808dd79fee0c0c13(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:32:54,070 DEBUG [RS:0;e2fd862922a0:40685-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 40d83bebc06ac5292c7cdc20116583fd: 2024-11-25T10:32:54,070 INFO [RS:0;e2fd862922a0:40685-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd., storeName=40d83bebc06ac5292c7cdc20116583fd/info, priority=15, startTime=1732530774029; duration=0sec 2024-11-25T10:32:54,070 DEBUG [RS:0;e2fd862922a0:40685-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:32:54,070 DEBUG [RS:0;e2fd862922a0:40685-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 40d83bebc06ac5292c7cdc20116583fd:info 2024-11-25T10:32:54,078 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/.tmp/table/5276126d81ba4f708c57767d4972af8c is 65, key is TestLogRolling-testLogRolling/table:state/1732530751444/Put/seqid=0 2024-11-25T10:32:54,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741852_1028 (size=5340) 2024-11-25T10:32:54,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741852_1028 (size=5340) 2024-11-25T10:32:54,082 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/.tmp/table/5276126d81ba4f708c57767d4972af8c 2024-11-25T10:32:54,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/.tmp/info/e6a74446131d4b16802e6f89b165792d as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/info/e6a74446131d4b16802e6f89b165792d 2024-11-25T10:32:54,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/info/e6a74446131d4b16802e6f89b165792d, entries=30, sequenceid=17, filesize=9.7 K 2024-11-25T10:32:54,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/.tmp/ns/eb36b4e4f4dc44e08379076bddb99365 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/ns/eb36b4e4f4dc44e08379076bddb99365 2024-11-25T10:32:54,097 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/ns/eb36b4e4f4dc44e08379076bddb99365, entries=2, sequenceid=17, filesize=5.0 K 2024-11-25T10:32:54,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/.tmp/table/5276126d81ba4f708c57767d4972af8c as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/table/5276126d81ba4f708c57767d4972af8c 2024-11-25T10:32:54,102 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/table/5276126d81ba4f708c57767d4972af8c, entries=2, sequenceid=17, filesize=5.2 K 2024-11-25T10:32:54,103 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 99ms, sequenceid=17, compaction requested=false 2024-11-25T10:32:54,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-25T10:32:54,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:54,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:54,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:55,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:57070 deadline: 1732530785334, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. is not online on e2fd862922a0,40685,1732530750258 2024-11-25T10:32:55,335 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f., hostname=e2fd862922a0,40685,1732530750258, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f., hostname=e2fd862922a0,40685,1732530750258, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. is not online on e2fd862922a0,40685,1732530750258 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T10:32:55,336 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f., hostname=e2fd862922a0,40685,1732530750258, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f. is not online on e2fd862922a0,40685,1732530750258 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-25T10:32:55,336 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732530751085.db362bd9276f0f7b0aea2f60885c6e5f., hostname=e2fd862922a0,40685,1732530750258, seqNum=2 from cache 2024-11-25T10:32:55,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:55,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:55,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:56,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:56,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:56,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:57,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:57,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:57,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:58,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,576 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,577 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,577 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,577 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,577 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,594 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,594 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,594 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,599 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:58,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:58,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:58,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:59,105 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-25T10:32:59,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,106 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,107 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,107 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,107 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,108 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,124 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,125 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-25T10:32:59,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:59,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:32:59,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:00,192 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-25T10:33:00,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:00,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:00,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:01,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:01,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:01,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:02,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:02,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:02,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:03,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:03,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:03,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:04,658 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:04,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:04,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:05,357 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315., hostname=e2fd862922a0,40685,1732530750258, seqNum=86] 2024-11-25T10:33:05,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:05,368 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T10:33:05,373 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/f8ddf316d6eb43318afbbeb41ace4446 is 1080, key is row0065/info:/1732530785358/Put/seqid=0 2024-11-25T10:33:05,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741853_1029 (size=12509) 2024-11-25T10:33:05,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741853_1029 (size=12509) 2024-11-25T10:33:05,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/f8ddf316d6eb43318afbbeb41ace4446 2024-11-25T10:33:05,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/f8ddf316d6eb43318afbbeb41ace4446 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/f8ddf316d6eb43318afbbeb41ace4446 2024-11-25T10:33:05,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/f8ddf316d6eb43318afbbeb41ace4446, entries=7, sequenceid=96, filesize=12.2 K 2024-11-25T10:33:05,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 156ba42f3cf44382c1e5d9b730cfb315 in 23ms, sequenceid=96, compaction requested=false 2024-11-25T10:33:05,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:05,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:05,392 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-25T10:33:05,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/184d0d0bec1b49cda19f4df3df54ef23 is 1080, key is row0072/info:/1732530785369/Put/seqid=0 2024-11-25T10:33:05,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741854_1030 (size=21141) 2024-11-25T10:33:05,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741854_1030 (size=21141) 2024-11-25T10:33:05,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/184d0d0bec1b49cda19f4df3df54ef23 2024-11-25T10:33:05,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/184d0d0bec1b49cda19f4df3df54ef23 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/184d0d0bec1b49cda19f4df3df54ef23 2024-11-25T10:33:05,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/184d0d0bec1b49cda19f4df3df54ef23, entries=15, sequenceid=114, filesize=20.6 K 2024-11-25T10:33:05,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=10.51 KB/10760 for 156ba42f3cf44382c1e5d9b730cfb315 in 20ms, sequenceid=114, compaction requested=true 2024-11-25T10:33:05,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:05,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 156ba42f3cf44382c1e5d9b730cfb315:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T10:33:05,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:05,412 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T10:33:05,413 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41910 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T10:33:05,413 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1541): 156ba42f3cf44382c1e5d9b730cfb315/info is initiating minor compaction (all files) 2024-11-25T10:33:05,413 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 156ba42f3cf44382c1e5d9b730cfb315/info in TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:33:05,413 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4761feddc7f244f291fd851d204abef0, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/f8ddf316d6eb43318afbbeb41ace4446, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/184d0d0bec1b49cda19f4df3df54ef23] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp, totalSize=40.9 K 2024-11-25T10:33:05,414 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4761feddc7f244f291fd851d204abef0, keycount=3, bloomtype=ROW, size=8.1 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1732530773328 2024-11-25T10:33:05,414 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting f8ddf316d6eb43318afbbeb41ace4446, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732530785358 2024-11-25T10:33:05,414 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 184d0d0bec1b49cda19f4df3df54ef23, keycount=15, bloomtype=ROW, size=20.6 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732530785369 2024-11-25T10:33:05,425 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 156ba42f3cf44382c1e5d9b730cfb315#info#compaction#68 average throughput is 25.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:33:05,426 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/5690590538a246749d5adcdaa09dc3be is 1080, key is row0062/info:/1732530773328/Put/seqid=0 2024-11-25T10:33:05,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741855_1031 (size=32086) 2024-11-25T10:33:05,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741855_1031 (size=32086) 2024-11-25T10:33:05,436 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/5690590538a246749d5adcdaa09dc3be as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/5690590538a246749d5adcdaa09dc3be 2024-11-25T10:33:05,442 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 156ba42f3cf44382c1e5d9b730cfb315/info of 156ba42f3cf44382c1e5d9b730cfb315 into 5690590538a246749d5adcdaa09dc3be(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:33:05,442 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:05,442 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315., storeName=156ba42f3cf44382c1e5d9b730cfb315/info, priority=13, startTime=1732530785412; duration=0sec 2024-11-25T10:33:05,442 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:05,442 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 156ba42f3cf44382c1e5d9b730cfb315:info 2024-11-25T10:33:05,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:05,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:05,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:06,659 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:06,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:06,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:07,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:07,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-25T10:33:07,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/4589cafdfc6b45fc824a2533b9d461e5 is 1080, key is row0087/info:/1732530785393/Put/seqid=0 2024-11-25T10:33:07,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741856_1032 (size=16818) 2024-11-25T10:33:07,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741856_1032 (size=16818) 2024-11-25T10:33:07,421 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/4589cafdfc6b45fc824a2533b9d461e5 2024-11-25T10:33:07,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/4589cafdfc6b45fc824a2533b9d461e5 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4589cafdfc6b45fc824a2533b9d461e5 2024-11-25T10:33:07,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4589cafdfc6b45fc824a2533b9d461e5, entries=11, sequenceid=129, filesize=16.4 K 2024-11-25T10:33:07,432 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for 156ba42f3cf44382c1e5d9b730cfb315 in 21ms, sequenceid=129, compaction requested=false 2024-11-25T10:33:07,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:07,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:07,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-25T10:33:07,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/40377952031f46d0bf85607746ee4d7c is 1080, key is row0098/info:/1732530787412/Put/seqid=0 2024-11-25T10:33:07,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741857_1033 (size=20078) 2024-11-25T10:33:07,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741857_1033 (size=20078) 2024-11-25T10:33:07,443 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=146 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/40377952031f46d0bf85607746ee4d7c 2024-11-25T10:33:07,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/40377952031f46d0bf85607746ee4d7c as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/40377952031f46d0bf85607746ee4d7c 2024-11-25T10:33:07,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/40377952031f46d0bf85607746ee4d7c, entries=14, sequenceid=146, filesize=19.6 K 2024-11-25T10:33:07,455 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=12.61 KB/12912 for 156ba42f3cf44382c1e5d9b730cfb315 in 22ms, sequenceid=146, compaction requested=true 2024-11-25T10:33:07,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:07,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 156ba42f3cf44382c1e5d9b730cfb315:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T10:33:07,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:07,455 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T10:33:07,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:07,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-25T10:33:07,457 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 68982 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T10:33:07,457 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1541): 156ba42f3cf44382c1e5d9b730cfb315/info is initiating minor compaction (all files) 2024-11-25T10:33:07,457 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 156ba42f3cf44382c1e5d9b730cfb315/info in TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:33:07,457 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/5690590538a246749d5adcdaa09dc3be, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4589cafdfc6b45fc824a2533b9d461e5, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/40377952031f46d0bf85607746ee4d7c] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp, totalSize=67.4 K 2024-11-25T10:33:07,458 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5690590538a246749d5adcdaa09dc3be, keycount=25, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732530773328 2024-11-25T10:33:07,458 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4589cafdfc6b45fc824a2533b9d461e5, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732530785393 2024-11-25T10:33:07,458 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 40377952031f46d0bf85607746ee4d7c, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1732530787412 2024-11-25T10:33:07,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/0638809b77904d67925fef0a094c9e8f is 1080, key is row0112/info:/1732530787434/Put/seqid=0 2024-11-25T10:33:07,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741858_1034 (size=19000) 2024-11-25T10:33:07,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741858_1034 (size=19000) 2024-11-25T10:33:07,467 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/0638809b77904d67925fef0a094c9e8f 2024-11-25T10:33:07,470 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 156ba42f3cf44382c1e5d9b730cfb315#info#compaction#72 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:33:07,471 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/b26222678c5f42b0ae0a89ba853ba978 is 1080, key is row0062/info:/1732530773328/Put/seqid=0 2024-11-25T10:33:07,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/0638809b77904d67925fef0a094c9e8f as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/0638809b77904d67925fef0a094c9e8f 2024-11-25T10:33:07,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741859_1035 (size=59168) 2024-11-25T10:33:07,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741859_1035 (size=59168) 2024-11-25T10:33:07,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/0638809b77904d67925fef0a094c9e8f, entries=13, sequenceid=162, filesize=18.6 K 2024-11-25T10:33:07,478 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for 156ba42f3cf44382c1e5d9b730cfb315 in 22ms, sequenceid=162, compaction requested=false 2024-11-25T10:33:07,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:07,481 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/b26222678c5f42b0ae0a89ba853ba978 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/b26222678c5f42b0ae0a89ba853ba978 2024-11-25T10:33:07,486 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 156ba42f3cf44382c1e5d9b730cfb315/info of 156ba42f3cf44382c1e5d9b730cfb315 into b26222678c5f42b0ae0a89ba853ba978(size=57.8 K), total size for store is 76.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:33:07,486 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:07,486 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315., storeName=156ba42f3cf44382c1e5d9b730cfb315/info, priority=13, startTime=1732530787455; duration=0sec 2024-11-25T10:33:07,486 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:07,486 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 156ba42f3cf44382c1e5d9b730cfb315:info 2024-11-25T10:33:07,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:07,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:07,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:08,660 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:08,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:08,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:09,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:09,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T10:33:09,474 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/c2d633e1d0474929b19f0524d867759e is 1080, key is row0125/info:/1732530787458/Put/seqid=0 2024-11-25T10:33:09,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741860_1036 (size=12516) 2024-11-25T10:33:09,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741860_1036 (size=12516) 2024-11-25T10:33:09,479 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/c2d633e1d0474929b19f0524d867759e 2024-11-25T10:33:09,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/c2d633e1d0474929b19f0524d867759e as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/c2d633e1d0474929b19f0524d867759e 2024-11-25T10:33:09,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/c2d633e1d0474929b19f0524d867759e, entries=7, sequenceid=173, filesize=12.2 K 2024-11-25T10:33:09,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 156ba42f3cf44382c1e5d9b730cfb315 in 20ms, sequenceid=173, compaction requested=true 2024-11-25T10:33:09,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:09,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 156ba42f3cf44382c1e5d9b730cfb315:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T10:33:09,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:09,491 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T10:33:09,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:09,491 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-25T10:33:09,492 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 90684 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T10:33:09,492 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1541): 156ba42f3cf44382c1e5d9b730cfb315/info is initiating minor compaction (all files) 2024-11-25T10:33:09,492 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 156ba42f3cf44382c1e5d9b730cfb315/info in TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:33:09,492 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/b26222678c5f42b0ae0a89ba853ba978, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/0638809b77904d67925fef0a094c9e8f, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/c2d633e1d0474929b19f0524d867759e] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp, totalSize=88.6 K 2024-11-25T10:33:09,493 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting b26222678c5f42b0ae0a89ba853ba978, keycount=50, bloomtype=ROW, size=57.8 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1732530773328 2024-11-25T10:33:09,493 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0638809b77904d67925fef0a094c9e8f, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732530787434 2024-11-25T10:33:09,493 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting c2d633e1d0474929b19f0524d867759e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732530787458 2024-11-25T10:33:09,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/2cb7d1010065470a95324a7eb3fec0a1 is 1080, key is row0132/info:/1732530789471/Put/seqid=0 2024-11-25T10:33:09,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741861_1037 (size=20078) 2024-11-25T10:33:09,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741861_1037 (size=20078) 2024-11-25T10:33:09,503 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/2cb7d1010065470a95324a7eb3fec0a1 2024-11-25T10:33:09,505 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 156ba42f3cf44382c1e5d9b730cfb315#info#compaction#75 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:33:09,505 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/19e6208bdd4b41b08246ed2dca6ce4c2 is 1080, key is row0062/info:/1732530773328/Put/seqid=0 2024-11-25T10:33:09,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/2cb7d1010065470a95324a7eb3fec0a1 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/2cb7d1010065470a95324a7eb3fec0a1 2024-11-25T10:33:09,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741862_1038 (size=80967) 2024-11-25T10:33:09,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/2cb7d1010065470a95324a7eb3fec0a1, entries=14, sequenceid=190, filesize=19.6 K 2024-11-25T10:33:09,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741862_1038 (size=80967) 2024-11-25T10:33:09,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 156ba42f3cf44382c1e5d9b730cfb315 in 25ms, sequenceid=190, compaction requested=false 2024-11-25T10:33:09,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:09,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:09,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-25T10:33:09,522 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/19e6208bdd4b41b08246ed2dca6ce4c2 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/19e6208bdd4b41b08246ed2dca6ce4c2 2024-11-25T10:33:09,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/d05f963749444e87b4341cd7c8360e79 is 1080, key is row0146/info:/1732530789492/Put/seqid=0 2024-11-25T10:33:09,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741863_1039 (size=17906) 2024-11-25T10:33:09,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741863_1039 (size=17906) 2024-11-25T10:33:09,529 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 156ba42f3cf44382c1e5d9b730cfb315/info of 156ba42f3cf44382c1e5d9b730cfb315 into 19e6208bdd4b41b08246ed2dca6ce4c2(size=79.1 K), total size for store is 98.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:33:09,529 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/d05f963749444e87b4341cd7c8360e79 2024-11-25T10:33:09,529 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:09,529 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315., storeName=156ba42f3cf44382c1e5d9b730cfb315/info, priority=13, startTime=1732530789490; duration=0sec 2024-11-25T10:33:09,529 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:09,529 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 156ba42f3cf44382c1e5d9b730cfb315:info 2024-11-25T10:33:09,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/d05f963749444e87b4341cd7c8360e79 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d05f963749444e87b4341cd7c8360e79 2024-11-25T10:33:09,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d05f963749444e87b4341cd7c8360e79, entries=12, sequenceid=205, filesize=17.5 K 2024-11-25T10:33:09,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=3.15 KB/3228 for 156ba42f3cf44382c1e5d9b730cfb315 in 23ms, sequenceid=205, compaction requested=true 2024-11-25T10:33:09,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:09,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 156ba42f3cf44382c1e5d9b730cfb315:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T10:33:09,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:09,540 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T10:33:09,541 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118951 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T10:33:09,541 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1541): 156ba42f3cf44382c1e5d9b730cfb315/info is initiating minor compaction (all files) 2024-11-25T10:33:09,541 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 156ba42f3cf44382c1e5d9b730cfb315/info in TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:33:09,541 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/19e6208bdd4b41b08246ed2dca6ce4c2, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/2cb7d1010065470a95324a7eb3fec0a1, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d05f963749444e87b4341cd7c8360e79] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp, totalSize=116.2 K 2024-11-25T10:33:09,542 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 19e6208bdd4b41b08246ed2dca6ce4c2, keycount=70, bloomtype=ROW, size=79.1 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732530773328 2024-11-25T10:33:09,542 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2cb7d1010065470a95324a7eb3fec0a1, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1732530789471 2024-11-25T10:33:09,542 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting d05f963749444e87b4341cd7c8360e79, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732530789492 2024-11-25T10:33:09,554 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 156ba42f3cf44382c1e5d9b730cfb315#info#compaction#77 average throughput is 49.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:33:09,555 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/9785a387a72c4e38a234680c9e2e9471 is 1080, key is row0062/info:/1732530773328/Put/seqid=0 2024-11-25T10:33:09,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741864_1040 (size=109105) 2024-11-25T10:33:09,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741864_1040 (size=109105) 2024-11-25T10:33:09,565 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/9785a387a72c4e38a234680c9e2e9471 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/9785a387a72c4e38a234680c9e2e9471 2024-11-25T10:33:09,571 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 156ba42f3cf44382c1e5d9b730cfb315/info of 156ba42f3cf44382c1e5d9b730cfb315 into 9785a387a72c4e38a234680c9e2e9471(size=106.5 K), total size for store is 106.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:33:09,571 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:09,571 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315., storeName=156ba42f3cf44382c1e5d9b730cfb315/info, priority=13, startTime=1732530789540; duration=0sec 2024-11-25T10:33:09,571 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:09,571 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 156ba42f3cf44382c1e5d9b730cfb315:info 2024-11-25T10:33:09,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:09,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:09,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:10,661 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:10,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:10,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:11,069 INFO [master/e2fd862922a0:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-25T10:33:11,069 INFO [master/e2fd862922a0:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-25T10:33:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:11,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T10:33:11,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/a01f210bd2164898b9e963adc2bb75dd is 1080, key is row0158/info:/1732530789519/Put/seqid=0 2024-11-25T10:33:11,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741865_1041 (size=12516) 2024-11-25T10:33:11,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741865_1041 (size=12516) 2024-11-25T10:33:11,541 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/a01f210bd2164898b9e963adc2bb75dd 2024-11-25T10:33:11,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/a01f210bd2164898b9e963adc2bb75dd as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/a01f210bd2164898b9e963adc2bb75dd 2024-11-25T10:33:11,551 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/a01f210bd2164898b9e963adc2bb75dd, entries=7, sequenceid=217, filesize=12.2 K 2024-11-25T10:33:11,552 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for 156ba42f3cf44382c1e5d9b730cfb315 in 22ms, sequenceid=217, compaction requested=false 2024-11-25T10:33:11,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:11,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:11,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-25T10:33:11,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/7e66637024f540a299e96cb49a8a17dc is 1080, key is row0165/info:/1732530791531/Put/seqid=0 2024-11-25T10:33:11,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741866_1042 (size=20078) 2024-11-25T10:33:11,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741866_1042 (size=20078) 2024-11-25T10:33:11,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/7e66637024f540a299e96cb49a8a17dc 2024-11-25T10:33:11,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/7e66637024f540a299e96cb49a8a17dc as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/7e66637024f540a299e96cb49a8a17dc 2024-11-25T10:33:11,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/7e66637024f540a299e96cb49a8a17dc, entries=14, sequenceid=234, filesize=19.6 K 2024-11-25T10:33:11,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 156ba42f3cf44382c1e5d9b730cfb315 in 19ms, sequenceid=234, compaction requested=true 2024-11-25T10:33:11,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:11,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 156ba42f3cf44382c1e5d9b730cfb315:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T10:33:11,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:11,572 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T10:33:11,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:11,573 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 141699 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T10:33:11,573 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-25T10:33:11,573 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1541): 156ba42f3cf44382c1e5d9b730cfb315/info is initiating minor compaction (all files) 2024-11-25T10:33:11,573 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 156ba42f3cf44382c1e5d9b730cfb315/info in TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:33:11,573 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/9785a387a72c4e38a234680c9e2e9471, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/a01f210bd2164898b9e963adc2bb75dd, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/7e66637024f540a299e96cb49a8a17dc] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp, totalSize=138.4 K 2024-11-25T10:33:11,574 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9785a387a72c4e38a234680c9e2e9471, keycount=96, bloomtype=ROW, size=106.5 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732530773328 2024-11-25T10:33:11,574 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting a01f210bd2164898b9e963adc2bb75dd, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1732530789519 2024-11-25T10:33:11,574 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7e66637024f540a299e96cb49a8a17dc, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732530791531 2024-11-25T10:33:11,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/8743966c06ad49cdb769ba0dd485f097 is 1080, key is row0179/info:/1732530791554/Put/seqid=0 2024-11-25T10:33:11,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741867_1043 (size=20078) 2024-11-25T10:33:11,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741867_1043 (size=20078) 2024-11-25T10:33:11,585 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 156ba42f3cf44382c1e5d9b730cfb315#info#compaction#81 average throughput is 60.03 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:33:11,585 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/84faca81139045549446e01c4b33e0f4 is 1080, key is row0062/info:/1732530773328/Put/seqid=0 2024-11-25T10:33:11,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741868_1044 (size=131977) 2024-11-25T10:33:11,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741868_1044 (size=131977) 2024-11-25T10:33:11,594 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/84faca81139045549446e01c4b33e0f4 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/84faca81139045549446e01c4b33e0f4 2024-11-25T10:33:11,599 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 156ba42f3cf44382c1e5d9b730cfb315/info of 156ba42f3cf44382c1e5d9b730cfb315 into 84faca81139045549446e01c4b33e0f4(size=128.9 K), total size for store is 128.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:33:11,599 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:11,599 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315., storeName=156ba42f3cf44382c1e5d9b730cfb315/info, priority=13, startTime=1732530791572; duration=0sec 2024-11-25T10:33:11,599 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:11,599 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 156ba42f3cf44382c1e5d9b730cfb315:info 2024-11-25T10:33:11,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:11,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:11,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:11,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/8743966c06ad49cdb769ba0dd485f097 2024-11-25T10:33:11,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/8743966c06ad49cdb769ba0dd485f097 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/8743966c06ad49cdb769ba0dd485f097 2024-11-25T10:33:11,991 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/8743966c06ad49cdb769ba0dd485f097, entries=14, sequenceid=251, filesize=19.6 K 2024-11-25T10:33:11,992 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=0 B/0 for 156ba42f3cf44382c1e5d9b730cfb315 in 419ms, sequenceid=251, compaction requested=false 2024-11-25T10:33:11,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:12,662 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:12,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:12,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:13,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:13,584 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-25T10:33:13,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/26fb4200d27a4b2fa8566ec3aea8295b is 1080, key is row0193/info:/1732530793574/Put/seqid=0 2024-11-25T10:33:13,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741869_1045 (size=12522) 2024-11-25T10:33:13,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741869_1045 (size=12522) 2024-11-25T10:33:13,597 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/26fb4200d27a4b2fa8566ec3aea8295b 2024-11-25T10:33:13,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/26fb4200d27a4b2fa8566ec3aea8295b as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/26fb4200d27a4b2fa8566ec3aea8295b 2024-11-25T10:33:13,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/26fb4200d27a4b2fa8566ec3aea8295b, entries=7, sequenceid=262, filesize=12.2 K 2024-11-25T10:33:13,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for 156ba42f3cf44382c1e5d9b730cfb315 in 24ms, sequenceid=262, compaction requested=true 2024-11-25T10:33:13,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:13,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 156ba42f3cf44382c1e5d9b730cfb315:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T10:33:13,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:13,608 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T10:33:13,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:13,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-25T10:33:13,609 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 164577 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T10:33:13,609 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1541): 156ba42f3cf44382c1e5d9b730cfb315/info is initiating minor compaction (all files) 2024-11-25T10:33:13,609 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 156ba42f3cf44382c1e5d9b730cfb315/info in TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:33:13,610 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/84faca81139045549446e01c4b33e0f4, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/8743966c06ad49cdb769ba0dd485f097, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/26fb4200d27a4b2fa8566ec3aea8295b] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp, totalSize=160.7 K 2024-11-25T10:33:13,610 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 84faca81139045549446e01c4b33e0f4, keycount=117, bloomtype=ROW, size=128.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732530773328 2024-11-25T10:33:13,610 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8743966c06ad49cdb769ba0dd485f097, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732530791554 2024-11-25T10:33:13,611 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 26fb4200d27a4b2fa8566ec3aea8295b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732530793574 2024-11-25T10:33:13,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/4f28a79482c642bb882fa39b0a28f1ef is 1080, key is row0200/info:/1732530793585/Put/seqid=0 2024-11-25T10:33:13,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741870_1046 (size=23333) 2024-11-25T10:33:13,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741870_1046 (size=23333) 2024-11-25T10:33:13,623 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 156ba42f3cf44382c1e5d9b730cfb315#info#compaction#84 average throughput is 70.80 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:33:13,623 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/d345fecec4a24eaca5170c40d956726c is 1080, key is row0062/info:/1732530773328/Put/seqid=0 2024-11-25T10:33:13,624 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/4f28a79482c642bb882fa39b0a28f1ef 2024-11-25T10:33:13,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741871_1047 (size=154800) 2024-11-25T10:33:13,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741871_1047 (size=154800) 2024-11-25T10:33:13,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/4f28a79482c642bb882fa39b0a28f1ef as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4f28a79482c642bb882fa39b0a28f1ef 2024-11-25T10:33:13,633 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/d345fecec4a24eaca5170c40d956726c as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d345fecec4a24eaca5170c40d956726c 2024-11-25T10:33:13,634 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4f28a79482c642bb882fa39b0a28f1ef, entries=17, sequenceid=282, filesize=22.8 K 2024-11-25T10:33:13,635 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=8.41 KB/8608 for 156ba42f3cf44382c1e5d9b730cfb315 in 26ms, sequenceid=282, compaction requested=false 2024-11-25T10:33:13,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:13,639 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 156ba42f3cf44382c1e5d9b730cfb315/info of 156ba42f3cf44382c1e5d9b730cfb315 into d345fecec4a24eaca5170c40d956726c(size=151.2 K), total size for store is 174.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:33:13,639 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:13,639 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315., storeName=156ba42f3cf44382c1e5d9b730cfb315/info, priority=13, startTime=1732530793608; duration=0sec 2024-11-25T10:33:13,639 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:13,639 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 156ba42f3cf44382c1e5d9b730cfb315:info 2024-11-25T10:33:13,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:13,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:13,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:14,663 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:14,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:14,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:15,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:15,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-25T10:33:15,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/399489dc5ff24c73b3d8526c51ce7ec2 is 1080, key is row0217/info:/1732530793610/Put/seqid=0 2024-11-25T10:33:15,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741872_1048 (size=14681) 2024-11-25T10:33:15,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741872_1048 (size=14681) 2024-11-25T10:33:15,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/399489dc5ff24c73b3d8526c51ce7ec2 2024-11-25T10:33:15,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/399489dc5ff24c73b3d8526c51ce7ec2 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/399489dc5ff24c73b3d8526c51ce7ec2 2024-11-25T10:33:15,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/399489dc5ff24c73b3d8526c51ce7ec2, entries=9, sequenceid=295, filesize=14.3 K 2024-11-25T10:33:15,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=13.66 KB/13988 for 156ba42f3cf44382c1e5d9b730cfb315 in 23ms, sequenceid=295, compaction requested=true 2024-11-25T10:33:15,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:15,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 156ba42f3cf44382c1e5d9b730cfb315:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T10:33:15,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:15,647 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T10:33:15,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:15,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-25T10:33:15,648 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192814 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T10:33:15,648 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1541): 156ba42f3cf44382c1e5d9b730cfb315/info is initiating minor compaction (all files) 2024-11-25T10:33:15,648 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 156ba42f3cf44382c1e5d9b730cfb315/info in TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:33:15,648 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d345fecec4a24eaca5170c40d956726c, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4f28a79482c642bb882fa39b0a28f1ef, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/399489dc5ff24c73b3d8526c51ce7ec2] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp, totalSize=188.3 K 2024-11-25T10:33:15,649 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting d345fecec4a24eaca5170c40d956726c, keycount=138, bloomtype=ROW, size=151.2 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732530773328 2024-11-25T10:33:15,649 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4f28a79482c642bb882fa39b0a28f1ef, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732530793585 2024-11-25T10:33:15,650 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 399489dc5ff24c73b3d8526c51ce7ec2, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732530793610 2024-11-25T10:33:15,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/9e1058a1fa9e4999acf3205852db4b68 is 1080, key is row0226/info:/1732530795625/Put/seqid=0 2024-11-25T10:33:15,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741873_1049 (size=20092) 2024-11-25T10:33:15,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741873_1049 (size=20092) 2024-11-25T10:33:15,661 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/9e1058a1fa9e4999acf3205852db4b68 2024-11-25T10:33:15,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:15,666 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 156ba42f3cf44382c1e5d9b730cfb315#info#compaction#87 average throughput is 56.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:33:15,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/9e1058a1fa9e4999acf3205852db4b68 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/9e1058a1fa9e4999acf3205852db4b68 2024-11-25T10:33:15,667 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/80582365ac1e469fba9cbc69536e673f is 1080, key is row0062/info:/1732530773328/Put/seqid=0 2024-11-25T10:33:15,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741874_1050 (size=182960) 2024-11-25T10:33:15,671 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/9e1058a1fa9e4999acf3205852db4b68, entries=14, sequenceid=312, filesize=19.6 K 2024-11-25T10:33:15,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741874_1050 (size=182960) 2024-11-25T10:33:15,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 156ba42f3cf44382c1e5d9b730cfb315 in 25ms, sequenceid=312, compaction requested=false 2024-11-25T10:33:15,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:15,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40685 {}] regionserver.HRegion(8855): Flush requested on 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:15,674 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-25T10:33:15,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/8eff43d5460844fc9c433c0020378159 is 1080, key is row0240/info:/1732530795648/Put/seqid=0 2024-11-25T10:33:15,678 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/80582365ac1e469fba9cbc69536e673f as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/80582365ac1e469fba9cbc69536e673f 2024-11-25T10:33:15,685 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 156ba42f3cf44382c1e5d9b730cfb315/info of 156ba42f3cf44382c1e5d9b730cfb315 into 80582365ac1e469fba9cbc69536e673f(size=178.7 K), total size for store is 198.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:33:15,685 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:15,685 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315., storeName=156ba42f3cf44382c1e5d9b730cfb315/info, priority=13, startTime=1732530795647; duration=0sec 2024-11-25T10:33:15,685 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:15,685 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 156ba42f3cf44382c1e5d9b730cfb315:info 2024-11-25T10:33:15,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741875_1051 (size=17918) 2024-11-25T10:33:15,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741875_1051 (size=17918) 2024-11-25T10:33:15,686 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/8eff43d5460844fc9c433c0020378159 2024-11-25T10:33:15,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/8eff43d5460844fc9c433c0020378159 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/8eff43d5460844fc9c433c0020378159 2024-11-25T10:33:15,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/8eff43d5460844fc9c433c0020378159, entries=12, sequenceid=327, filesize=17.5 K 2024-11-25T10:33:15,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=5.25 KB/5380 for 156ba42f3cf44382c1e5d9b730cfb315 in 21ms, sequenceid=327, compaction requested=true 2024-11-25T10:33:15,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:15,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 156ba42f3cf44382c1e5d9b730cfb315:info, priority=-2147483648, current under compaction store size is 1 2024-11-25T10:33:15,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:15,696 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-25T10:33:15,696 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 220970 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-25T10:33:15,697 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1541): 156ba42f3cf44382c1e5d9b730cfb315/info is initiating minor compaction (all files) 2024-11-25T10:33:15,697 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 156ba42f3cf44382c1e5d9b730cfb315/info in TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:33:15,697 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/80582365ac1e469fba9cbc69536e673f, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/9e1058a1fa9e4999acf3205852db4b68, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/8eff43d5460844fc9c433c0020378159] into tmpdir=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp, totalSize=215.8 K 2024-11-25T10:33:15,697 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 80582365ac1e469fba9cbc69536e673f, keycount=164, bloomtype=ROW, size=178.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732530773328 2024-11-25T10:33:15,697 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9e1058a1fa9e4999acf3205852db4b68, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732530795625 2024-11-25T10:33:15,698 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8eff43d5460844fc9c433c0020378159, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732530795648 2024-11-25T10:33:15,708 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 156ba42f3cf44382c1e5d9b730cfb315#info#compaction#89 average throughput is 64.99 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-25T10:33:15,708 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/01b43cb79fef4c9aa2a04b792c8c30b2 is 1080, key is row0062/info:/1732530773328/Put/seqid=0 2024-11-25T10:33:15,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741876_1052 (size=211193) 2024-11-25T10:33:15,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741876_1052 (size=211193) 2024-11-25T10:33:15,716 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/01b43cb79fef4c9aa2a04b792c8c30b2 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/01b43cb79fef4c9aa2a04b792c8c30b2 2024-11-25T10:33:15,721 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 156ba42f3cf44382c1e5d9b730cfb315/info of 156ba42f3cf44382c1e5d9b730cfb315 into 01b43cb79fef4c9aa2a04b792c8c30b2(size=206.2 K), total size for store is 206.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-25T10:33:15,722 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:15,722 INFO [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315., storeName=156ba42f3cf44382c1e5d9b730cfb315/info, priority=13, startTime=1732530795695; duration=0sec 2024-11-25T10:33:15,722 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-25T10:33:15,722 DEBUG [RS:0;e2fd862922a0:40685-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 156ba42f3cf44382c1e5d9b730cfb315:info 2024-11-25T10:33:15,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:15,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:16,025 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-11-25T10:33:16,664 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:16,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:16,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:17,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:17,682 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-25T10:33:17,683 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C40685%2C1732530750258.1732530797683 2024-11-25T10:33:17,689 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,689 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,689 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,689 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,689 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,690 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/WALs/e2fd862922a0,40685,1732530750258/e2fd862922a0%2C40685%2C1732530750258.1732530750637 with entries=314, filesize=308.87 KB; new WAL /user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/WALs/e2fd862922a0,40685,1732530750258/e2fd862922a0%2C40685%2C1732530750258.1732530797683 2024-11-25T10:33:17,690 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40965:40965),(127.0.0.1/127.0.0.1:32779:32779)] 2024-11-25T10:33:17,691 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/WALs/e2fd862922a0,40685,1732530750258/e2fd862922a0%2C40685%2C1732530750258.1732530750637 is not closed yet, will try archiving it next time 2024-11-25T10:33:17,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741833_1009 (size=316287) 2024-11-25T10:33:17,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741833_1009 (size=316287) 2024-11-25T10:33:17,695 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-25T10:33:17,698 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/.tmp/info/75f41d31df554eec9055716c3ccda3a2 is 186, key is TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd./info:regioninfo/1732530774032/Put/seqid=0 2024-11-25T10:33:17,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741878_1054 (size=6153) 2024-11-25T10:33:17,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741878_1054 (size=6153) 2024-11-25T10:33:17,703 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/.tmp/info/75f41d31df554eec9055716c3ccda3a2 2024-11-25T10:33:17,707 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/.tmp/info/75f41d31df554eec9055716c3ccda3a2 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/info/75f41d31df554eec9055716c3ccda3a2 2024-11-25T10:33:17,712 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/info/75f41d31df554eec9055716c3ccda3a2, entries=5, sequenceid=21, filesize=6.0 K 2024-11-25T10:33:17,713 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 18ms, sequenceid=21, compaction requested=false 2024-11-25T10:33:17,713 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-25T10:33:17,714 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 40d83bebc06ac5292c7cdc20116583fd: 2024-11-25T10:33:17,714 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 156ba42f3cf44382c1e5d9b730cfb315 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-11-25T10:33:17,718 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/98132f7856e6429e81b834828d5e7fed is 1080, key is row0252/info:/1732530795674/Put/seqid=0 2024-11-25T10:33:17,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741879_1055 (size=10357) 2024-11-25T10:33:17,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741879_1055 (size=10357) 2024-11-25T10:33:17,722 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/98132f7856e6429e81b834828d5e7fed 2024-11-25T10:33:17,727 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/.tmp/info/98132f7856e6429e81b834828d5e7fed as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/98132f7856e6429e81b834828d5e7fed 2024-11-25T10:33:17,731 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/98132f7856e6429e81b834828d5e7fed, entries=5, sequenceid=337, filesize=10.1 K 2024-11-25T10:33:17,732 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 156ba42f3cf44382c1e5d9b730cfb315 in 18ms, sequenceid=337, compaction requested=false 2024-11-25T10:33:17,732 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 156ba42f3cf44382c1e5d9b730cfb315: 2024-11-25T10:33:17,732 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C40685%2C1732530750258.1732530797732 2024-11-25T10:33:17,738 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,738 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,738 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,738 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,738 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,738 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/WALs/e2fd862922a0,40685,1732530750258/e2fd862922a0%2C40685%2C1732530750258.1732530797683 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/WALs/e2fd862922a0,40685,1732530750258/e2fd862922a0%2C40685%2C1732530750258.1732530797732 2024-11-25T10:33:17,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741877_1053 (size=731) 2024-11-25T10:33:17,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741877_1053 (size=731) 2024-11-25T10:33:17,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:17,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:17,743 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/WALs/e2fd862922a0,40685,1732530750258/e2fd862922a0%2C40685%2C1732530750258.1732530750637 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/oldWALs/e2fd862922a0%2C40685%2C1732530750258.1732530750637 2024-11-25T10:33:17,744 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32779:32779),(127.0.0.1/127.0.0.1:40965:40965)] 2024-11-25T10:33:17,744 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-25T10:33:17,744 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/WALs/e2fd862922a0,40685,1732530750258/e2fd862922a0%2C40685%2C1732530750258.1732530797683 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/oldWALs/e2fd862922a0%2C40685%2C1732530750258.1732530797683 2024-11-25T10:33:17,745 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T10:33:17,745 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T10:33:17,745 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:33:17,745 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:33:17,745 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:33:17,745 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T10:33:17,746 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T10:33:17,746 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=386419941, stopped=false 2024-11-25T10:33:17,746 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e2fd862922a0,34293,1732530750208 2024-11-25T10:33:17,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:33:17,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:33:17,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:17,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:17,747 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T10:33:17,747 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T10:33:17,748 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:33:17,748 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:33:17,748 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2fd862922a0,40685,1732530750258' ***** 2024-11-25T10:33:17,748 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T10:33:17,748 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:33:17,748 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:33:17,748 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T10:33:17,748 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T10:33:17,749 INFO [RS:0;e2fd862922a0:40685 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T10:33:17,749 INFO [RS:0;e2fd862922a0:40685 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T10:33:17,749 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(3091): Received CLOSE for 40d83bebc06ac5292c7cdc20116583fd 2024-11-25T10:33:17,749 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(3091): Received CLOSE for 156ba42f3cf44382c1e5d9b730cfb315 2024-11-25T10:33:17,749 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(959): stopping server e2fd862922a0,40685,1732530750258 2024-11-25T10:33:17,749 INFO [RS:0;e2fd862922a0:40685 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:33:17,749 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 40d83bebc06ac5292c7cdc20116583fd, disabling compactions & flushes 2024-11-25T10:33:17,749 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd. 2024-11-25T10:33:17,749 INFO [RS:0;e2fd862922a0:40685 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e2fd862922a0:40685. 2024-11-25T10:33:17,749 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd. 2024-11-25T10:33:17,749 DEBUG [RS:0;e2fd862922a0:40685 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:33:17,749 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd. after waiting 0 ms 2024-11-25T10:33:17,749 DEBUG [RS:0;e2fd862922a0:40685 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:33:17,749 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd. 2024-11-25T10:33:17,749 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T10:33:17,749 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T10:33:17,749 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T10:33:17,749 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T10:33:17,750 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-25T10:33:17,750 DEBUG [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 40d83bebc06ac5292c7cdc20116583fd=TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd., 156ba42f3cf44382c1e5d9b730cfb315=TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.} 2024-11-25T10:33:17,750 DEBUG [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(1351): Waiting on 156ba42f3cf44382c1e5d9b730cfb315, 1588230740, 40d83bebc06ac5292c7cdc20116583fd 2024-11-25T10:33:17,750 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T10:33:17,750 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T10:33:17,750 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T10:33:17,750 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T10:33:17,750 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T10:33:17,750 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/40d83bebc06ac5292c7cdc20116583fd/info/d5e744653ad34d6eaedff838c54889ba.db362bd9276f0f7b0aea2f60885c6e5f->hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/d5e744653ad34d6eaedff838c54889ba-bottom] to archive 2024-11-25T10:33:17,751 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T10:33:17,752 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/40d83bebc06ac5292c7cdc20116583fd/info/d5e744653ad34d6eaedff838c54889ba.db362bd9276f0f7b0aea2f60885c6e5f to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/40d83bebc06ac5292c7cdc20116583fd/info/d5e744653ad34d6eaedff838c54889ba.db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:33:17,753 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e2fd862922a0:34293 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-25T10:33:17,753 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-25T10:33:17,758 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/40d83bebc06ac5292c7cdc20116583fd/recovered.edits/90.seqid, newMaxSeqId=90, maxSeqId=85 2024-11-25T10:33:17,758 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-25T10:33:17,759 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd. 2024-11-25T10:33:17,759 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 40d83bebc06ac5292c7cdc20116583fd: Waiting for close lock at 1732530797749Running coprocessor pre-close hooks at 1732530797749Disabling compacts and flushes for region at 1732530797749Disabling writes for close at 1732530797749Writing region close event to WAL at 1732530797755 (+6 ms)Running coprocessor post-close hooks at 1732530797759 (+4 ms)Closed at 1732530797759 2024-11-25T10:33:17,759 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T10:33:17,759 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T10:33:17,759 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732530797750Running coprocessor pre-close hooks at 1732530797750Disabling compacts and flushes for region at 1732530797750Disabling writes for close at 1732530797750Writing region close event to WAL at 1732530797756 (+6 ms)Running coprocessor post-close hooks at 1732530797759 (+3 ms)Closed at 1732530797759 2024-11-25T10:33:17,759 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732530773386.40d83bebc06ac5292c7cdc20116583fd. 2024-11-25T10:33:17,759 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T10:33:17,759 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 156ba42f3cf44382c1e5d9b730cfb315, disabling compactions & flushes 2024-11-25T10:33:17,759 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:33:17,759 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:33:17,759 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. after waiting 0 ms 2024-11-25T10:33:17,759 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:33:17,760 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d5e744653ad34d6eaedff838c54889ba.db362bd9276f0f7b0aea2f60885c6e5f->hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/db362bd9276f0f7b0aea2f60885c6e5f/info/d5e744653ad34d6eaedff838c54889ba-top, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4761feddc7f244f291fd851d204abef0, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/f8ddf316d6eb43318afbbeb41ace4446, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/5690590538a246749d5adcdaa09dc3be, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/184d0d0bec1b49cda19f4df3df54ef23, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4589cafdfc6b45fc824a2533b9d461e5, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/b26222678c5f42b0ae0a89ba853ba978, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/40377952031f46d0bf85607746ee4d7c, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/0638809b77904d67925fef0a094c9e8f, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/19e6208bdd4b41b08246ed2dca6ce4c2, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/c2d633e1d0474929b19f0524d867759e, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/2cb7d1010065470a95324a7eb3fec0a1, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/9785a387a72c4e38a234680c9e2e9471, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d05f963749444e87b4341cd7c8360e79, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/a01f210bd2164898b9e963adc2bb75dd, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/84faca81139045549446e01c4b33e0f4, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/7e66637024f540a299e96cb49a8a17dc, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/8743966c06ad49cdb769ba0dd485f097, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d345fecec4a24eaca5170c40d956726c, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/26fb4200d27a4b2fa8566ec3aea8295b, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4f28a79482c642bb882fa39b0a28f1ef, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/80582365ac1e469fba9cbc69536e673f, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/399489dc5ff24c73b3d8526c51ce7ec2, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/9e1058a1fa9e4999acf3205852db4b68, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/8eff43d5460844fc9c433c0020378159] to archive 2024-11-25T10:33:17,761 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-25T10:33:17,762 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d5e744653ad34d6eaedff838c54889ba.db362bd9276f0f7b0aea2f60885c6e5f to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d5e744653ad34d6eaedff838c54889ba.db362bd9276f0f7b0aea2f60885c6e5f 2024-11-25T10:33:17,763 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4761feddc7f244f291fd851d204abef0 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4761feddc7f244f291fd851d204abef0 2024-11-25T10:33:17,764 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/f8ddf316d6eb43318afbbeb41ace4446 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/f8ddf316d6eb43318afbbeb41ace4446 2024-11-25T10:33:17,765 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/5690590538a246749d5adcdaa09dc3be to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/5690590538a246749d5adcdaa09dc3be 2024-11-25T10:33:17,767 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/184d0d0bec1b49cda19f4df3df54ef23 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/184d0d0bec1b49cda19f4df3df54ef23 2024-11-25T10:33:17,768 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4589cafdfc6b45fc824a2533b9d461e5 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4589cafdfc6b45fc824a2533b9d461e5 2024-11-25T10:33:17,769 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/b26222678c5f42b0ae0a89ba853ba978 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/b26222678c5f42b0ae0a89ba853ba978 2024-11-25T10:33:17,770 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/40377952031f46d0bf85607746ee4d7c to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/40377952031f46d0bf85607746ee4d7c 2024-11-25T10:33:17,771 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/0638809b77904d67925fef0a094c9e8f to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/0638809b77904d67925fef0a094c9e8f 2024-11-25T10:33:17,772 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/19e6208bdd4b41b08246ed2dca6ce4c2 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/19e6208bdd4b41b08246ed2dca6ce4c2 2024-11-25T10:33:17,773 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/c2d633e1d0474929b19f0524d867759e to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/c2d633e1d0474929b19f0524d867759e 2024-11-25T10:33:17,774 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/2cb7d1010065470a95324a7eb3fec0a1 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/2cb7d1010065470a95324a7eb3fec0a1 2024-11-25T10:33:17,776 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/9785a387a72c4e38a234680c9e2e9471 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/9785a387a72c4e38a234680c9e2e9471 2024-11-25T10:33:17,777 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d05f963749444e87b4341cd7c8360e79 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d05f963749444e87b4341cd7c8360e79 2024-11-25T10:33:17,778 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/a01f210bd2164898b9e963adc2bb75dd to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/a01f210bd2164898b9e963adc2bb75dd 2024-11-25T10:33:17,779 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/84faca81139045549446e01c4b33e0f4 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/84faca81139045549446e01c4b33e0f4 2024-11-25T10:33:17,780 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/7e66637024f540a299e96cb49a8a17dc to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/7e66637024f540a299e96cb49a8a17dc 2024-11-25T10:33:17,781 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/8743966c06ad49cdb769ba0dd485f097 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/8743966c06ad49cdb769ba0dd485f097 2024-11-25T10:33:17,782 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d345fecec4a24eaca5170c40d956726c to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/d345fecec4a24eaca5170c40d956726c 2024-11-25T10:33:17,783 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/26fb4200d27a4b2fa8566ec3aea8295b to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/26fb4200d27a4b2fa8566ec3aea8295b 2024-11-25T10:33:17,784 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4f28a79482c642bb882fa39b0a28f1ef to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/4f28a79482c642bb882fa39b0a28f1ef 2024-11-25T10:33:17,785 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/80582365ac1e469fba9cbc69536e673f to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/80582365ac1e469fba9cbc69536e673f 2024-11-25T10:33:17,786 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/399489dc5ff24c73b3d8526c51ce7ec2 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/399489dc5ff24c73b3d8526c51ce7ec2 2024-11-25T10:33:17,788 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/9e1058a1fa9e4999acf3205852db4b68 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/9e1058a1fa9e4999acf3205852db4b68 2024-11-25T10:33:17,789 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/8eff43d5460844fc9c433c0020378159 to hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/archive/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/info/8eff43d5460844fc9c433c0020378159 2024-11-25T10:33:17,789 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [4761feddc7f244f291fd851d204abef0=8260, f8ddf316d6eb43318afbbeb41ace4446=12509, 5690590538a246749d5adcdaa09dc3be=32086, 184d0d0bec1b49cda19f4df3df54ef23=21141, 4589cafdfc6b45fc824a2533b9d461e5=16818, b26222678c5f42b0ae0a89ba853ba978=59168, 40377952031f46d0bf85607746ee4d7c=20078, 0638809b77904d67925fef0a094c9e8f=19000, 19e6208bdd4b41b08246ed2dca6ce4c2=80967, c2d633e1d0474929b19f0524d867759e=12516, 2cb7d1010065470a95324a7eb3fec0a1=20078, 9785a387a72c4e38a234680c9e2e9471=109105, d05f963749444e87b4341cd7c8360e79=17906, a01f210bd2164898b9e963adc2bb75dd=12516, 84faca81139045549446e01c4b33e0f4=131977, 7e66637024f540a299e96cb49a8a17dc=20078, 8743966c06ad49cdb769ba0dd485f097=20078, d345fecec4a24eaca5170c40d956726c=154800, 26fb4200d27a4b2fa8566ec3aea8295b=12522, 4f28a79482c642bb882fa39b0a28f1ef=23333, 80582365ac1e469fba9cbc69536e673f=182960, 399489dc5ff24c73b3d8526c51ce7ec2=14681, 9e1058a1fa9e4999acf3205852db4b68=20092, 8eff43d5460844fc9c433c0020378159=17918] 2024-11-25T10:33:17,792 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/data/default/TestLogRolling-testLogRolling/156ba42f3cf44382c1e5d9b730cfb315/recovered.edits/340.seqid, newMaxSeqId=340, maxSeqId=85 2024-11-25T10:33:17,793 INFO [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:33:17,793 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 156ba42f3cf44382c1e5d9b730cfb315: Waiting for close lock at 1732530797759Running coprocessor pre-close hooks at 1732530797759Disabling compacts and flushes for region at 1732530797759Disabling writes for close at 1732530797759Writing region close event to WAL at 1732530797789 (+30 ms)Running coprocessor post-close hooks at 1732530797793 (+4 ms)Closed at 1732530797793 2024-11-25T10:33:17,793 DEBUG [RS_CLOSE_REGION-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732530773386.156ba42f3cf44382c1e5d9b730cfb315. 2024-11-25T10:33:17,950 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(976): stopping server e2fd862922a0,40685,1732530750258; all regions closed. 2024-11-25T10:33:17,950 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,951 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,951 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,951 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,951 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741834_1010 (size=8107) 2024-11-25T10:33:17,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741834_1010 (size=8107) 2024-11-25T10:33:17,955 DEBUG [RS:0;e2fd862922a0:40685 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/oldWALs 2024-11-25T10:33:17,955 INFO [RS:0;e2fd862922a0:40685 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C40685%2C1732530750258.meta:.meta(num 1732530751007) 2024-11-25T10:33:17,955 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,955 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,955 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,956 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,956 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:17,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741880_1056 (size=778) 2024-11-25T10:33:17,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741880_1056 (size=778) 2024-11-25T10:33:17,959 DEBUG [RS:0;e2fd862922a0:40685 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/oldWALs 2024-11-25T10:33:17,959 INFO [RS:0;e2fd862922a0:40685 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C40685%2C1732530750258:(num 1732530797732) 2024-11-25T10:33:17,959 DEBUG [RS:0;e2fd862922a0:40685 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:33:17,959 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:33:17,959 INFO [RS:0;e2fd862922a0:40685 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:33:17,960 INFO [RS:0;e2fd862922a0:40685 {}] hbase.ChoreService(370): Chore service for: regionserver/e2fd862922a0:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T10:33:17,960 INFO [RS:0;e2fd862922a0:40685 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:33:17,960 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:33:17,960 INFO [RS:0;e2fd862922a0:40685 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40685 2024-11-25T10:33:17,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:33:17,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2fd862922a0,40685,1732530750258 2024-11-25T10:33:17,962 INFO [RS:0;e2fd862922a0:40685 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:33:17,962 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2fd862922a0,40685,1732530750258] 2024-11-25T10:33:17,965 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2fd862922a0,40685,1732530750258 already deleted, retry=false 2024-11-25T10:33:17,965 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2fd862922a0,40685,1732530750258 expired; onlineServers=0 2024-11-25T10:33:17,965 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e2fd862922a0,34293,1732530750208' ***** 2024-11-25T10:33:17,965 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T10:33:17,965 INFO [M:0;e2fd862922a0:34293 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:33:17,965 INFO [M:0;e2fd862922a0:34293 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:33:17,965 DEBUG [M:0;e2fd862922a0:34293 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T10:33:17,965 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T10:33:17,965 DEBUG [M:0;e2fd862922a0:34293 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T10:33:17,965 DEBUG [master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530750409 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530750409,5,FailOnTimeoutGroup] 2024-11-25T10:33:17,965 DEBUG [master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530750410 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530750410,5,FailOnTimeoutGroup] 2024-11-25T10:33:17,965 INFO [M:0;e2fd862922a0:34293 {}] hbase.ChoreService(370): Chore service for: master/e2fd862922a0:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T10:33:17,965 INFO [M:0;e2fd862922a0:34293 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:33:17,966 DEBUG [M:0;e2fd862922a0:34293 {}] master.HMaster(1795): Stopping service threads 2024-11-25T10:33:17,966 INFO [M:0;e2fd862922a0:34293 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T10:33:17,966 INFO [M:0;e2fd862922a0:34293 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T10:33:17,966 ERROR [M:0;e2fd862922a0:34293 {}] procedure2.ProcedureExecutor(763): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:40299,5,PEWorkerGroup] 2024-11-25T10:33:17,966 INFO [M:0;e2fd862922a0:34293 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T10:33:17,966 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T10:33:17,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T10:33:17,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:17,967 DEBUG [M:0;e2fd862922a0:34293 {}] zookeeper.ZKUtil(347): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T10:33:17,967 WARN [M:0;e2fd862922a0:34293 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T10:33:17,967 INFO [M:0;e2fd862922a0:34293 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/.lastflushedseqids 2024-11-25T10:33:17,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741881_1057 (size=228) 2024-11-25T10:33:17,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741881_1057 (size=228) 2024-11-25T10:33:17,973 INFO [M:0;e2fd862922a0:34293 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T10:33:17,973 INFO [M:0;e2fd862922a0:34293 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T10:33:17,973 DEBUG [M:0;e2fd862922a0:34293 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T10:33:17,973 INFO [M:0;e2fd862922a0:34293 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:33:17,973 DEBUG [M:0;e2fd862922a0:34293 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:33:17,973 DEBUG [M:0;e2fd862922a0:34293 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T10:33:17,973 DEBUG [M:0;e2fd862922a0:34293 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:33:17,973 INFO [M:0;e2fd862922a0:34293 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-25T10:33:17,989 DEBUG [M:0;e2fd862922a0:34293 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8d63f75701a5400895d6d58f1dd3e1cb is 82, key is hbase:meta,,1/info:regioninfo/1732530751040/Put/seqid=0 2024-11-25T10:33:17,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741882_1058 (size=5672) 2024-11-25T10:33:17,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741882_1058 (size=5672) 2024-11-25T10:33:17,994 INFO [M:0;e2fd862922a0:34293 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8d63f75701a5400895d6d58f1dd3e1cb 2024-11-25T10:33:18,011 DEBUG [M:0;e2fd862922a0:34293 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a9949aceec4d459cb3297020a9728f3d is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732530751448/Put/seqid=0 2024-11-25T10:33:18,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741883_1059 (size=7090) 2024-11-25T10:33:18,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741883_1059 (size=7090) 2024-11-25T10:33:18,016 INFO [M:0;e2fd862922a0:34293 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a9949aceec4d459cb3297020a9728f3d 2024-11-25T10:33:18,019 INFO [M:0;e2fd862922a0:34293 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a9949aceec4d459cb3297020a9728f3d 2024-11-25T10:33:18,033 DEBUG [M:0;e2fd862922a0:34293 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5f523ff275ef4925b634bc0ea1b15261 is 69, key is e2fd862922a0,40685,1732530750258/rs:state/1732530750493/Put/seqid=0 2024-11-25T10:33:18,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741884_1060 (size=5156) 2024-11-25T10:33:18,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741884_1060 (size=5156) 2024-11-25T10:33:18,038 INFO [M:0;e2fd862922a0:34293 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5f523ff275ef4925b634bc0ea1b15261 2024-11-25T10:33:18,055 DEBUG [M:0;e2fd862922a0:34293 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8e906a83a6264d35bc24a53af73a828a is 52, key is load_balancer_on/state:d/1732530751082/Put/seqid=0 2024-11-25T10:33:18,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741885_1061 (size=5056) 2024-11-25T10:33:18,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741885_1061 (size=5056) 2024-11-25T10:33:18,060 INFO [M:0;e2fd862922a0:34293 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8e906a83a6264d35bc24a53af73a828a 2024-11-25T10:33:18,063 INFO [RS:0;e2fd862922a0:40685 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:33:18,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:33:18,063 INFO [RS:0;e2fd862922a0:40685 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2fd862922a0,40685,1732530750258; zookeeper connection closed. 2024-11-25T10:33:18,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40685-0x1014eaf2d820001, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:33:18,064 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@68cbea13 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@68cbea13 2024-11-25T10:33:18,064 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T10:33:18,065 DEBUG [M:0;e2fd862922a0:34293 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8d63f75701a5400895d6d58f1dd3e1cb as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8d63f75701a5400895d6d58f1dd3e1cb 2024-11-25T10:33:18,068 INFO [M:0;e2fd862922a0:34293 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8d63f75701a5400895d6d58f1dd3e1cb, entries=8, sequenceid=125, filesize=5.5 K 2024-11-25T10:33:18,069 DEBUG [M:0;e2fd862922a0:34293 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a9949aceec4d459cb3297020a9728f3d as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a9949aceec4d459cb3297020a9728f3d 2024-11-25T10:33:18,073 INFO [M:0;e2fd862922a0:34293 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a9949aceec4d459cb3297020a9728f3d 2024-11-25T10:33:18,073 INFO [M:0;e2fd862922a0:34293 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a9949aceec4d459cb3297020a9728f3d, entries=13, sequenceid=125, filesize=6.9 K 2024-11-25T10:33:18,074 DEBUG [M:0;e2fd862922a0:34293 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5f523ff275ef4925b634bc0ea1b15261 as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5f523ff275ef4925b634bc0ea1b15261 2024-11-25T10:33:18,078 INFO [M:0;e2fd862922a0:34293 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5f523ff275ef4925b634bc0ea1b15261, entries=1, sequenceid=125, filesize=5.0 K 2024-11-25T10:33:18,079 DEBUG [M:0;e2fd862922a0:34293 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8e906a83a6264d35bc24a53af73a828a as hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8e906a83a6264d35bc24a53af73a828a 2024-11-25T10:33:18,083 INFO [M:0;e2fd862922a0:34293 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40299/user/jenkins/test-data/12b7ced1-076e-dde9-e3c0-c56c6762408d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8e906a83a6264d35bc24a53af73a828a, entries=1, sequenceid=125, filesize=4.9 K 2024-11-25T10:33:18,084 INFO [M:0;e2fd862922a0:34293 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=125, compaction requested=false 2024-11-25T10:33:18,085 INFO [M:0;e2fd862922a0:34293 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:33:18,085 DEBUG [M:0;e2fd862922a0:34293 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732530797973Disabling compacts and flushes for region at 1732530797973Disabling writes for close at 1732530797973Obtaining lock to block concurrent updates at 1732530797973Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732530797973Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1732530797974 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732530797974Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732530797974Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732530797989 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732530797989Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732530797997 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732530798011 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732530798011Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732530798019 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732530798033 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732530798033Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732530798041 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732530798055 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732530798055Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49a98c7: reopening flushed file at 1732530798064 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69ff9fe6: reopening flushed file at 1732530798069 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e949dac: reopening flushed file at 1732530798073 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1fba13cc: reopening flushed file at 1732530798078 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=125, compaction requested=false at 1732530798084 (+6 ms)Writing region close event to WAL at 1732530798085 (+1 ms)Closed at 1732530798085 2024-11-25T10:33:18,086 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:18,086 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:18,086 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:18,086 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:18,086 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:18,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32869 is added to blk_1073741830_1006 (size=61320) 2024-11-25T10:33:18,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44101 is added to blk_1073741830_1006 (size=61320) 2024-11-25T10:33:18,088 INFO [M:0;e2fd862922a0:34293 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T10:33:18,088 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:33:18,088 INFO [M:0;e2fd862922a0:34293 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34293 2024-11-25T10:33:18,089 INFO [M:0;e2fd862922a0:34293 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:33:18,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:33:18,190 INFO [M:0;e2fd862922a0:34293 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:33:18,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34293-0x1014eaf2d820000, quorum=127.0.0.1:58511, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:33:18,193 WARN [BP-1457786347-172.17.0.2-1732530749544 heartbeating to localhost/127.0.0.1:40299 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1457786347-172.17.0.2-1732530749544 (Datanode Uuid 2dc40fde-6e02-4684-994c-c63536d17900) service to localhost/127.0.0.1:40299 2024-11-25T10:33:18,193 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/cluster_280f76f3-9441-dd7e-295f-f768874e4966/data/data3/current/BP-1457786347-172.17.0.2-1732530749544 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:33:18,193 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3868302b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:33:18,193 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/cluster_280f76f3-9441-dd7e-295f-f768874e4966/data/data4/current/BP-1457786347-172.17.0.2-1732530749544 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:33:18,194 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@76385d85{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:33:18,194 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:33:18,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@551592b1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:33:18,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43fb00bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/hadoop.log.dir/,STOPPED} 2024-11-25T10:33:18,196 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:33:18,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@516e643a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:33:18,200 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@27e7702e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:33:18,200 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:33:18,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4181d37d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:33:18,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4300eaa8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/hadoop.log.dir/,STOPPED} 2024-11-25T10:33:18,202 WARN [BP-1457786347-172.17.0.2-1732530749544 heartbeating to localhost/127.0.0.1:40299 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:33:18,202 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:33:18,202 WARN [BP-1457786347-172.17.0.2-1732530749544 heartbeating to localhost/127.0.0.1:40299 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1457786347-172.17.0.2-1732530749544 (Datanode Uuid 5ccaba14-c692-4edf-86ca-2ab234956338) service to localhost/127.0.0.1:40299 2024-11-25T10:33:18,202 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:33:18,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/cluster_280f76f3-9441-dd7e-295f-f768874e4966/data/data1/current/BP-1457786347-172.17.0.2-1732530749544 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:33:18,203 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/cluster_280f76f3-9441-dd7e-295f-f768874e4966/data/data2/current/BP-1457786347-172.17.0.2-1732530749544 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:33:18,203 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:33:18,209 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@647c0ca7{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T10:33:18,209 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@458b9f0d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:33:18,209 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:33:18,209 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7142f9c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:33:18,209 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18de480b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/hadoop.log.dir/,STOPPED} 2024-11-25T10:33:18,217 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T10:33:18,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T10:33:18,255 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=233 (was 207) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:40299 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:40299 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40299 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40299 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40299 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:40299 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:40299 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40299 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40299 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=512 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=83 (was 47) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1490 (was 1535) 2024-11-25T10:33:18,263 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=233, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=83, ProcessCount=11, AvailableMemoryMB=1491 2024-11-25T10:33:18,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-25T10:33:18,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/hadoop.log.dir so I do NOT create it in target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669 2024-11-25T10:33:18,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/253470b1-d071-61be-80c0-b03a2d60625f/hadoop.tmp.dir so I do NOT create it in target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669 2024-11-25T10:33:18,263 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/cluster_408c46ca-9ca1-563e-4815-f028d18605ec, deleteOnExit=true 2024-11-25T10:33:18,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-25T10:33:18,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/test.cache.data in system properties and HBase conf 2024-11-25T10:33:18,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/hadoop.tmp.dir in system properties and HBase conf 2024-11-25T10:33:18,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/hadoop.log.dir in system properties and HBase conf 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-25T10:33:18,264 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/nfs.dump.dir in system properties and HBase conf 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/java.io.tmpdir in system properties and HBase conf 2024-11-25T10:33:18,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-25T10:33:18,265 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-25T10:33:18,265 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-25T10:33:18,277 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T10:33:18,333 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:33:18,337 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:33:18,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:33:18,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:33:18,338 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:33:18,339 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:33:18,339 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@442ec576{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:33:18,339 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3287f588{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:33:18,453 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3637fc70{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/java.io.tmpdir/jetty-localhost-35829-hadoop-hdfs-3_4_1-tests_jar-_-any-13558245062134581159/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T10:33:18,454 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6dda9ea2{HTTP/1.1, (http/1.1)}{localhost:35829} 2024-11-25T10:33:18,454 INFO [Time-limited test {}] server.Server(415): Started @289966ms 2024-11-25T10:33:18,466 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-25T10:33:18,506 INFO [regionserver/e2fd862922a0:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:33:18,531 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:33:18,533 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:33:18,534 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:33:18,534 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:33:18,534 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-25T10:33:18,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35a6030c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:33:18,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bfccd5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:33:18,648 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b825638{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/java.io.tmpdir/jetty-localhost-34789-hadoop-hdfs-3_4_1-tests_jar-_-any-10967991661212669911/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:33:18,649 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fcb3743{HTTP/1.1, (http/1.1)}{localhost:34789} 2024-11-25T10:33:18,649 INFO [Time-limited test {}] server.Server(415): Started @290161ms 2024-11-25T10:33:18,650 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:33:18,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:18,677 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-25T10:33:18,679 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-25T10:33:18,679 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-25T10:33:18,679 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-25T10:33:18,679 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-25T10:33:18,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@536a5c19{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/hadoop.log.dir/,AVAILABLE} 2024-11-25T10:33:18,680 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@352a3917{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-25T10:33:18,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:18,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:18,764 WARN [Thread-2474 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/cluster_408c46ca-9ca1-563e-4815-f028d18605ec/data/data2/current/BP-1149931814-172.17.0.2-1732530798283/current, will proceed with Du for space computation calculation, 2024-11-25T10:33:18,764 WARN [Thread-2473 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/cluster_408c46ca-9ca1-563e-4815-f028d18605ec/data/data1/current/BP-1149931814-172.17.0.2-1732530798283/current, will proceed with Du for space computation calculation, 2024-11-25T10:33:18,781 WARN [Thread-2452 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:33:18,784 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x74341b42ce9f407b with lease ID 0x53356b2761ac97da: Processing first storage report for DS-fcad3977-0103-4bfc-bc02-bdf600aac478 from datanode DatanodeRegistration(127.0.0.1:43409, datanodeUuid=f6bac507-d39b-407a-98d5-d3eb389b7d89, infoPort=43907, infoSecurePort=0, ipcPort=39077, storageInfo=lv=-57;cid=testClusterID;nsid=433804272;c=1732530798283) 2024-11-25T10:33:18,784 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x74341b42ce9f407b with lease ID 0x53356b2761ac97da: from storage DS-fcad3977-0103-4bfc-bc02-bdf600aac478 node DatanodeRegistration(127.0.0.1:43409, datanodeUuid=f6bac507-d39b-407a-98d5-d3eb389b7d89, infoPort=43907, infoSecurePort=0, ipcPort=39077, storageInfo=lv=-57;cid=testClusterID;nsid=433804272;c=1732530798283), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-25T10:33:18,784 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x74341b42ce9f407b with lease ID 0x53356b2761ac97da: Processing first storage report for DS-e1500fd7-c488-426d-8f9a-4016ebc946a5 from datanode DatanodeRegistration(127.0.0.1:43409, datanodeUuid=f6bac507-d39b-407a-98d5-d3eb389b7d89, infoPort=43907, infoSecurePort=0, ipcPort=39077, storageInfo=lv=-57;cid=testClusterID;nsid=433804272;c=1732530798283) 2024-11-25T10:33:18,784 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x74341b42ce9f407b with lease ID 0x53356b2761ac97da: from storage DS-e1500fd7-c488-426d-8f9a-4016ebc946a5 node DatanodeRegistration(127.0.0.1:43409, datanodeUuid=f6bac507-d39b-407a-98d5-d3eb389b7d89, infoPort=43907, infoSecurePort=0, ipcPort=39077, storageInfo=lv=-57;cid=testClusterID;nsid=433804272;c=1732530798283), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:33:18,801 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45e68726{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/java.io.tmpdir/jetty-localhost-44413-hadoop-hdfs-3_4_1-tests_jar-_-any-6532801988299604110/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:33:18,802 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6e1cda33{HTTP/1.1, (http/1.1)}{localhost:44413} 2024-11-25T10:33:18,802 INFO [Time-limited test {}] server.Server(415): Started @290314ms 2024-11-25T10:33:18,803 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-25T10:33:18,987 WARN [Thread-2500 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/cluster_408c46ca-9ca1-563e-4815-f028d18605ec/data/data4/current/BP-1149931814-172.17.0.2-1732530798283/current, will proceed with Du for space computation calculation, 2024-11-25T10:33:18,987 WARN [Thread-2499 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/cluster_408c46ca-9ca1-563e-4815-f028d18605ec/data/data3/current/BP-1149931814-172.17.0.2-1732530798283/current, will proceed with Du for space computation calculation, 2024-11-25T10:33:19,011 WARN [Thread-2488 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-25T10:33:19,013 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6b1b1f2bb2be6d34 with lease ID 0x53356b2761ac97db: Processing first storage report for DS-d4555a3d-5ed5-40c8-9328-0e30fd4032f9 from datanode DatanodeRegistration(127.0.0.1:39441, datanodeUuid=dac5dcaf-2be4-45a1-b4f7-89d3f3f285cf, infoPort=45723, infoSecurePort=0, ipcPort=34369, storageInfo=lv=-57;cid=testClusterID;nsid=433804272;c=1732530798283) 2024-11-25T10:33:19,013 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6b1b1f2bb2be6d34 with lease ID 0x53356b2761ac97db: from storage DS-d4555a3d-5ed5-40c8-9328-0e30fd4032f9 node DatanodeRegistration(127.0.0.1:39441, datanodeUuid=dac5dcaf-2be4-45a1-b4f7-89d3f3f285cf, infoPort=45723, infoSecurePort=0, ipcPort=34369, storageInfo=lv=-57;cid=testClusterID;nsid=433804272;c=1732530798283), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:33:19,013 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6b1b1f2bb2be6d34 with lease ID 0x53356b2761ac97db: Processing first storage report for DS-2252469c-aebe-40b3-b54c-9dbe5c988383 from datanode DatanodeRegistration(127.0.0.1:39441, datanodeUuid=dac5dcaf-2be4-45a1-b4f7-89d3f3f285cf, infoPort=45723, infoSecurePort=0, ipcPort=34369, storageInfo=lv=-57;cid=testClusterID;nsid=433804272;c=1732530798283) 2024-11-25T10:33:19,013 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6b1b1f2bb2be6d34 with lease ID 0x53356b2761ac97db: from storage DS-2252469c-aebe-40b3-b54c-9dbe5c988383 node DatanodeRegistration(127.0.0.1:39441, datanodeUuid=dac5dcaf-2be4-45a1-b4f7-89d3f3f285cf, infoPort=45723, infoSecurePort=0, ipcPort=34369, storageInfo=lv=-57;cid=testClusterID;nsid=433804272;c=1732530798283), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-25T10:33:19,024 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669 2024-11-25T10:33:19,026 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/cluster_408c46ca-9ca1-563e-4815-f028d18605ec/zookeeper_0, clientPort=64684, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/cluster_408c46ca-9ca1-563e-4815-f028d18605ec/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/cluster_408c46ca-9ca1-563e-4815-f028d18605ec/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-25T10:33:19,027 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64684 2024-11-25T10:33:19,027 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:33:19,029 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:33:19,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:33:19,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741825_1001 (size=7) 2024-11-25T10:33:19,038 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d with version=8 2024-11-25T10:33:19,038 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:39029/user/jenkins/test-data/65e4efd5-4567-f20e-dfb7-9f387ffe4cb3/hbase-staging 2024-11-25T10:33:19,040 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:33:19,040 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:33:19,040 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:33:19,040 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:33:19,040 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:33:19,040 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:33:19,040 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-25T10:33:19,040 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:33:19,041 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44549 2024-11-25T10:33:19,042 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44549 connecting to ZooKeeper ensemble=127.0.0.1:64684 2024-11-25T10:33:19,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:445490x0, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:33:19,047 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44549-0x1014eafec410000 connected 2024-11-25T10:33:19,061 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:33:19,062 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:33:19,064 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:33:19,064 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d, hbase.cluster.distributed=false 2024-11-25T10:33:19,066 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:33:19,066 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44549 2024-11-25T10:33:19,066 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44549 2024-11-25T10:33:19,066 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44549 2024-11-25T10:33:19,067 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44549 2024-11-25T10:33:19,067 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44549 2024-11-25T10:33:19,082 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e2fd862922a0:0 server-side Connection retries=45 2024-11-25T10:33:19,082 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:33:19,082 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-25T10:33:19,082 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-25T10:33:19,082 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-25T10:33:19,082 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-25T10:33:19,082 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-25T10:33:19,082 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-25T10:33:19,083 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46131 2024-11-25T10:33:19,083 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46131 connecting to ZooKeeper ensemble=127.0.0.1:64684 2024-11-25T10:33:19,084 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:33:19,085 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:33:19,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:461310x0, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-25T10:33:19,089 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:461310x0, quorum=127.0.0.1:64684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:33:19,089 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46131-0x1014eafec410001 connected 2024-11-25T10:33:19,089 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-25T10:33:19,090 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-25T10:33:19,090 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-25T10:33:19,091 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-25T10:33:19,092 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46131 2024-11-25T10:33:19,092 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46131 2024-11-25T10:33:19,092 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46131 2024-11-25T10:33:19,092 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46131 2024-11-25T10:33:19,092 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46131 2024-11-25T10:33:19,104 DEBUG [M:0;e2fd862922a0:44549 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e2fd862922a0:44549 2024-11-25T10:33:19,104 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e2fd862922a0,44549,1732530799039 2024-11-25T10:33:19,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:33:19,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:33:19,106 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e2fd862922a0,44549,1732530799039 2024-11-25T10:33:19,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-25T10:33:19,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:19,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:19,109 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-25T10:33:19,109 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e2fd862922a0,44549,1732530799039 from backup master directory 2024-11-25T10:33:19,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e2fd862922a0,44549,1732530799039 2024-11-25T10:33:19,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:33:19,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-25T10:33:19,110 WARN [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:33:19,110 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e2fd862922a0,44549,1732530799039 2024-11-25T10:33:19,114 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/hbase.id] with ID: 9835f977-1f8d-4cee-ae56-23dd579c13ed 2024-11-25T10:33:19,114 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/.tmp/hbase.id 2024-11-25T10:33:19,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741826_1002 (size=42) 2024-11-25T10:33:19,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741826_1002 (size=42) 2024-11-25T10:33:19,120 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/.tmp/hbase.id]:[hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/hbase.id] 2024-11-25T10:33:19,130 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:33:19,130 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-25T10:33:19,131 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-25T10:33:19,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:19,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:19,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:33:19,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741827_1003 (size=196) 2024-11-25T10:33:19,138 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-25T10:33:19,139 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-25T10:33:19,139 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:33:19,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741828_1004 (size=1189) 2024-11-25T10:33:19,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741828_1004 (size=1189) 2024-11-25T10:33:19,146 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store 2024-11-25T10:33:19,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741829_1005 (size=34) 2024-11-25T10:33:19,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741829_1005 (size=34) 2024-11-25T10:33:19,153 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:33:19,153 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T10:33:19,153 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:33:19,153 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:33:19,153 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T10:33:19,153 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:33:19,153 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:33:19,153 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732530799153Disabling compacts and flushes for region at 1732530799153Disabling writes for close at 1732530799153Writing region close event to WAL at 1732530799153Closed at 1732530799153 2024-11-25T10:33:19,154 WARN [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/.initializing 2024-11-25T10:33:19,154 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/WALs/e2fd862922a0,44549,1732530799039 2024-11-25T10:33:19,156 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C44549%2C1732530799039, suffix=, logDir=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/WALs/e2fd862922a0,44549,1732530799039, archiveDir=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/oldWALs, maxLogs=10 2024-11-25T10:33:19,157 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C44549%2C1732530799039.1732530799157 2024-11-25T10:33:19,161 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/WALs/e2fd862922a0,44549,1732530799039/e2fd862922a0%2C44549%2C1732530799039.1732530799157 2024-11-25T10:33:19,164 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45723:45723),(127.0.0.1/127.0.0.1:43907:43907)] 2024-11-25T10:33:19,167 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:33:19,167 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:33:19,167 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:33:19,167 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:33:19,168 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:33:19,169 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-25T10:33:19,170 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:33:19,170 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:33:19,170 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:33:19,171 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-25T10:33:19,171 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:33:19,171 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:33:19,171 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:33:19,172 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-25T10:33:19,172 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:33:19,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:33:19,173 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:33:19,174 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-25T10:33:19,174 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:33:19,174 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-25T10:33:19,174 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:33:19,175 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:33:19,175 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:33:19,176 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:33:19,176 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:33:19,177 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-25T10:33:19,177 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-25T10:33:19,179 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:33:19,179 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=798802, jitterRate=0.01572979986667633}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-25T10:33:19,180 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732530799167Initializing all the Stores at 1732530799168 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530799168Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530799168Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530799168Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530799168Cleaning up temporary data from old regions at 1732530799176 (+8 ms)Region opened successfully at 1732530799180 (+4 ms) 2024-11-25T10:33:19,180 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-25T10:33:19,183 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79a44125, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:33:19,184 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-25T10:33:19,184 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-25T10:33:19,184 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-25T10:33:19,184 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-25T10:33:19,184 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-25T10:33:19,185 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-25T10:33:19,185 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-25T10:33:19,188 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-25T10:33:19,189 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-25T10:33:19,190 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-25T10:33:19,190 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-25T10:33:19,191 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-25T10:33:19,193 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-25T10:33:19,193 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-25T10:33:19,194 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-25T10:33:19,195 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-25T10:33:19,195 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-25T10:33:19,196 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-25T10:33:19,198 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-25T10:33:19,199 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-25T10:33:19,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T10:33:19,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-25T10:33:19,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:19,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:19,202 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e2fd862922a0,44549,1732530799039, sessionid=0x1014eafec410000, setting cluster-up flag (Was=false) 2024-11-25T10:33:19,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:19,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:19,209 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-25T10:33:19,210 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2fd862922a0,44549,1732530799039 2024-11-25T10:33:19,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:19,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:19,218 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-25T10:33:19,219 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e2fd862922a0,44549,1732530799039 2024-11-25T10:33:19,220 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-25T10:33:19,221 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-25T10:33:19,222 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-25T10:33:19,222 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-25T10:33:19,222 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e2fd862922a0,44549,1732530799039 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-25T10:33:19,223 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:33:19,223 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:33:19,223 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:33:19,223 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e2fd862922a0:0, corePoolSize=5, maxPoolSize=5 2024-11-25T10:33:19,223 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e2fd862922a0:0, corePoolSize=10, maxPoolSize=10 2024-11-25T10:33:19,223 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:33:19,223 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:33:19,223 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:33:19,225 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:33:19,225 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-25T10:33:19,225 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:33:19,226 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-25T10:33:19,226 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732530829226 2024-11-25T10:33:19,227 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-25T10:33:19,227 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-25T10:33:19,227 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-25T10:33:19,227 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-25T10:33:19,227 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-25T10:33:19,227 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-25T10:33:19,227 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,227 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-25T10:33:19,227 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-25T10:33:19,228 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-25T10:33:19,228 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-25T10:33:19,228 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-25T10:33:19,228 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530799228,5,FailOnTimeoutGroup] 2024-11-25T10:33:19,228 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530799228,5,FailOnTimeoutGroup] 2024-11-25T10:33:19,228 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,228 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-25T10:33:19,228 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,228 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:33:19,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741831_1007 (size=1321) 2024-11-25T10:33:19,233 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-25T10:33:19,233 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d 2024-11-25T10:33:19,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:33:19,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741832_1008 (size=32) 2024-11-25T10:33:19,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:33:19,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T10:33:19,241 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T10:33:19,241 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:33:19,242 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:33:19,242 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T10:33:19,243 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T10:33:19,243 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:33:19,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:33:19,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T10:33:19,244 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T10:33:19,244 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:33:19,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:33:19,245 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T10:33:19,246 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T10:33:19,246 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:33:19,246 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:33:19,246 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T10:33:19,247 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/data/hbase/meta/1588230740 2024-11-25T10:33:19,247 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/data/hbase/meta/1588230740 2024-11-25T10:33:19,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T10:33:19,248 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T10:33:19,249 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T10:33:19,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T10:33:19,251 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-25T10:33:19,252 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805924, jitterRate=0.02478577196598053}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T10:33:19,252 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732530799239Initializing all the Stores at 1732530799240 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530799240Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530799240Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530799240Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530799240Cleaning up temporary data from old regions at 1732530799248 (+8 ms)Region opened successfully at 1732530799252 (+4 ms) 2024-11-25T10:33:19,252 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T10:33:19,252 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T10:33:19,252 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T10:33:19,252 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T10:33:19,252 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T10:33:19,253 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T10:33:19,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732530799252Disabling compacts and flushes for region at 1732530799252Disabling writes for close at 1732530799252Writing region close event to WAL at 1732530799253 (+1 ms)Closed at 1732530799253 2024-11-25T10:33:19,254 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:33:19,254 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-25T10:33:19,254 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-25T10:33:19,255 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T10:33:19,256 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-25T10:33:19,294 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(746): ClusterId : 9835f977-1f8d-4cee-ae56-23dd579c13ed 2024-11-25T10:33:19,294 DEBUG [RS:0;e2fd862922a0:46131 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-25T10:33:19,296 DEBUG [RS:0;e2fd862922a0:46131 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-25T10:33:19,296 DEBUG [RS:0;e2fd862922a0:46131 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-25T10:33:19,299 DEBUG [RS:0;e2fd862922a0:46131 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-25T10:33:19,299 DEBUG [RS:0;e2fd862922a0:46131 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d8f5c5d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e2fd862922a0/172.17.0.2:0 2024-11-25T10:33:19,311 DEBUG [RS:0;e2fd862922a0:46131 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e2fd862922a0:46131 2024-11-25T10:33:19,311 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-25T10:33:19,311 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-25T10:33:19,311 DEBUG [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-25T10:33:19,312 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(2659): reportForDuty to master=e2fd862922a0,44549,1732530799039 with port=46131, startcode=1732530799081 2024-11-25T10:33:19,312 DEBUG [RS:0;e2fd862922a0:46131 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-25T10:33:19,314 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52377, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-25T10:33:19,315 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44549 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e2fd862922a0,46131,1732530799081 2024-11-25T10:33:19,315 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44549 {}] master.ServerManager(517): Registering regionserver=e2fd862922a0,46131,1732530799081 2024-11-25T10:33:19,316 DEBUG [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d 2024-11-25T10:33:19,316 DEBUG [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41303 2024-11-25T10:33:19,317 DEBUG [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-25T10:33:19,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:33:19,318 DEBUG [RS:0;e2fd862922a0:46131 {}] zookeeper.ZKUtil(111): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e2fd862922a0,46131,1732530799081 2024-11-25T10:33:19,318 WARN [RS:0;e2fd862922a0:46131 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-25T10:33:19,319 INFO [RS:0;e2fd862922a0:46131 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:33:19,319 DEBUG [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/WALs/e2fd862922a0,46131,1732530799081 2024-11-25T10:33:19,319 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e2fd862922a0,46131,1732530799081] 2024-11-25T10:33:19,322 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-25T10:33:19,323 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-25T10:33:19,323 INFO [RS:0;e2fd862922a0:46131 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-25T10:33:19,323 INFO [RS:0;e2fd862922a0:46131 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,324 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-25T10:33:19,324 INFO [RS:0;e2fd862922a0:46131 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-25T10:33:19,324 INFO [RS:0;e2fd862922a0:46131 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,325 DEBUG [RS:0;e2fd862922a0:46131 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:33:19,325 DEBUG [RS:0;e2fd862922a0:46131 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:33:19,325 DEBUG [RS:0;e2fd862922a0:46131 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:33:19,325 DEBUG [RS:0;e2fd862922a0:46131 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:33:19,325 DEBUG [RS:0;e2fd862922a0:46131 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:33:19,325 DEBUG [RS:0;e2fd862922a0:46131 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e2fd862922a0:0, corePoolSize=2, maxPoolSize=2 2024-11-25T10:33:19,325 DEBUG [RS:0;e2fd862922a0:46131 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:33:19,325 DEBUG [RS:0;e2fd862922a0:46131 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:33:19,325 DEBUG [RS:0;e2fd862922a0:46131 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:33:19,325 DEBUG [RS:0;e2fd862922a0:46131 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:33:19,325 DEBUG [RS:0;e2fd862922a0:46131 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:33:19,325 DEBUG [RS:0;e2fd862922a0:46131 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e2fd862922a0:0, corePoolSize=1, maxPoolSize=1 2024-11-25T10:33:19,325 DEBUG [RS:0;e2fd862922a0:46131 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:33:19,325 DEBUG [RS:0;e2fd862922a0:46131 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e2fd862922a0:0, corePoolSize=3, maxPoolSize=3 2024-11-25T10:33:19,325 INFO [RS:0;e2fd862922a0:46131 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,325 INFO [RS:0;e2fd862922a0:46131 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,325 INFO [RS:0;e2fd862922a0:46131 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,325 INFO [RS:0;e2fd862922a0:46131 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,325 INFO [RS:0;e2fd862922a0:46131 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,325 INFO [RS:0;e2fd862922a0:46131 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,46131,1732530799081-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:33:19,340 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-25T10:33:19,340 INFO [RS:0;e2fd862922a0:46131 {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,46131,1732530799081-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,340 INFO [RS:0;e2fd862922a0:46131 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,340 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.Replication(171): e2fd862922a0,46131,1732530799081 started 2024-11-25T10:33:19,354 INFO [RS:0;e2fd862922a0:46131 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,354 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(1482): Serving as e2fd862922a0,46131,1732530799081, RpcServer on e2fd862922a0/172.17.0.2:46131, sessionid=0x1014eafec410001 2024-11-25T10:33:19,354 DEBUG [RS:0;e2fd862922a0:46131 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-25T10:33:19,354 DEBUG [RS:0;e2fd862922a0:46131 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e2fd862922a0,46131,1732530799081 2024-11-25T10:33:19,354 DEBUG [RS:0;e2fd862922a0:46131 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,46131,1732530799081' 2024-11-25T10:33:19,354 DEBUG [RS:0;e2fd862922a0:46131 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-25T10:33:19,354 DEBUG [RS:0;e2fd862922a0:46131 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-25T10:33:19,355 DEBUG [RS:0;e2fd862922a0:46131 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-25T10:33:19,355 DEBUG [RS:0;e2fd862922a0:46131 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-25T10:33:19,355 DEBUG [RS:0;e2fd862922a0:46131 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e2fd862922a0,46131,1732530799081 2024-11-25T10:33:19,355 DEBUG [RS:0;e2fd862922a0:46131 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e2fd862922a0,46131,1732530799081' 2024-11-25T10:33:19,355 DEBUG [RS:0;e2fd862922a0:46131 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-25T10:33:19,355 DEBUG [RS:0;e2fd862922a0:46131 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-25T10:33:19,355 DEBUG [RS:0;e2fd862922a0:46131 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-25T10:33:19,355 INFO [RS:0;e2fd862922a0:46131 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-25T10:33:19,355 INFO [RS:0;e2fd862922a0:46131 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-25T10:33:19,406 WARN [e2fd862922a0:44549 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-25T10:33:19,457 INFO [RS:0;e2fd862922a0:46131 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C46131%2C1732530799081, suffix=, logDir=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/WALs/e2fd862922a0,46131,1732530799081, archiveDir=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/oldWALs, maxLogs=32 2024-11-25T10:33:19,458 INFO [RS:0;e2fd862922a0:46131 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C46131%2C1732530799081.1732530799457 2024-11-25T10:33:19,462 INFO [RS:0;e2fd862922a0:46131 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/WALs/e2fd862922a0,46131,1732530799081/e2fd862922a0%2C46131%2C1732530799081.1732530799457 2024-11-25T10:33:19,463 DEBUG [RS:0;e2fd862922a0:46131 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45723:45723),(127.0.0.1/127.0.0.1:43907:43907)] 2024-11-25T10:33:19,656 DEBUG [e2fd862922a0:44549 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-25T10:33:19,657 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e2fd862922a0,46131,1732530799081 2024-11-25T10:33:19,658 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2fd862922a0,46131,1732530799081, state=OPENING 2024-11-25T10:33:19,659 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-25T10:33:19,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:19,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:19,661 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-25T10:33:19,661 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:33:19,661 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:33:19,661 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e2fd862922a0,46131,1732530799081}] 2024-11-25T10:33:19,665 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.1732530614468 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:19,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35207,1732530614007/e2fd862922a0%2C35207%2C1732530614007.meta.1732530614659.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:19,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46735/user/jenkins/test-data/0a56d3b8-9229-dc16-a5e3-be8ec5eced56/WALs/e2fd862922a0,35123,1732530614788/e2fd862922a0%2C35123%2C1732530614788.1732530614982 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-25T10:33:19,814 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-25T10:33:19,815 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37459, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-25T10:33:19,819 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-25T10:33:19,819 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:33:19,820 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e2fd862922a0%2C46131%2C1732530799081.meta, suffix=.meta, logDir=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/WALs/e2fd862922a0,46131,1732530799081, archiveDir=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/oldWALs, maxLogs=32 2024-11-25T10:33:19,821 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e2fd862922a0%2C46131%2C1732530799081.meta.1732530799820.meta 2024-11-25T10:33:19,826 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/WALs/e2fd862922a0,46131,1732530799081/e2fd862922a0%2C46131%2C1732530799081.meta.1732530799820.meta 2024-11-25T10:33:19,832 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43907:43907),(127.0.0.1/127.0.0.1:45723:45723)] 2024-11-25T10:33:19,836 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-25T10:33:19,837 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-25T10:33:19,837 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-25T10:33:19,837 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-25T10:33:19,837 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-25T10:33:19,837 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-25T10:33:19,837 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-25T10:33:19,837 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-25T10:33:19,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-25T10:33:19,839 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-25T10:33:19,839 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:33:19,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:33:19,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-25T10:33:19,840 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-25T10:33:19,840 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:33:19,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:33:19,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-25T10:33:19,841 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-25T10:33:19,841 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:33:19,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:33:19,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-25T10:33:19,842 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-25T10:33:19,842 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-25T10:33:19,842 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-25T10:33:19,842 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-25T10:33:19,843 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/data/hbase/meta/1588230740 2024-11-25T10:33:19,844 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/data/hbase/meta/1588230740 2024-11-25T10:33:19,845 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-25T10:33:19,845 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-25T10:33:19,845 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-25T10:33:19,846 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-25T10:33:19,847 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795285, jitterRate=0.011258065700531006}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-25T10:33:19,847 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-25T10:33:19,848 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732530799837Writing region info on filesystem at 1732530799837Initializing all the Stores at 1732530799838 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530799838Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530799838Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732530799838Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732530799838Cleaning up temporary data from old regions at 1732530799845 (+7 ms)Running coprocessor post-open hooks at 1732530799847 (+2 ms)Region opened successfully at 1732530799847 2024-11-25T10:33:19,849 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732530799813 2024-11-25T10:33:19,850 DEBUG [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-25T10:33:19,851 INFO [RS_OPEN_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-25T10:33:19,851 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e2fd862922a0,46131,1732530799081 2024-11-25T10:33:19,852 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e2fd862922a0,46131,1732530799081, state=OPEN 2024-11-25T10:33:19,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T10:33:19,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-25T10:33:19,857 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:33:19,857 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e2fd862922a0,46131,1732530799081 2024-11-25T10:33:19,857 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-25T10:33:19,859 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-25T10:33:19,859 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e2fd862922a0,46131,1732530799081 in 196 msec 2024-11-25T10:33:19,861 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-25T10:33:19,861 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-11-25T10:33:19,862 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-25T10:33:19,862 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-25T10:33:19,863 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T10:33:19,863 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2fd862922a0,46131,1732530799081, seqNum=-1] 2024-11-25T10:33:19,863 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T10:33:19,864 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44261, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T10:33:19,869 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 647 msec 2024-11-25T10:33:19,869 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732530799869, completionTime=-1 2024-11-25T10:33:19,869 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-25T10:33:19,869 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-25T10:33:19,871 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-25T10:33:19,871 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732530859871 2024-11-25T10:33:19,871 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732530919871 2024-11-25T10:33:19,871 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-25T10:33:19,871 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,44549,1732530799039-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,871 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,44549,1732530799039-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,871 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,44549,1732530799039-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,871 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e2fd862922a0:44549, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,871 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,871 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,873 DEBUG [master/e2fd862922a0:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-25T10:33:19,875 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.765sec 2024-11-25T10:33:19,875 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-25T10:33:19,875 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-25T10:33:19,875 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-25T10:33:19,875 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-25T10:33:19,875 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-25T10:33:19,875 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,44549,1732530799039-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-25T10:33:19,875 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,44549,1732530799039-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-25T10:33:19,877 DEBUG [master/e2fd862922a0:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-25T10:33:19,877 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-25T10:33:19,877 INFO [master/e2fd862922a0:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e2fd862922a0,44549,1732530799039-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-25T10:33:19,894 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22ed154c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:33:19,894 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e2fd862922a0,44549,-1 for getting cluster id 2024-11-25T10:33:19,895 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-25T10:33:19,896 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9835f977-1f8d-4cee-ae56-23dd579c13ed' 2024-11-25T10:33:19,896 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-25T10:33:19,896 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9835f977-1f8d-4cee-ae56-23dd579c13ed" 2024-11-25T10:33:19,897 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45806f01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:33:19,897 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e2fd862922a0,44549,-1] 2024-11-25T10:33:19,897 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-25T10:33:19,897 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:33:19,898 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43046, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-25T10:33:19,899 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c9b811e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-25T10:33:19,899 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-25T10:33:19,900 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e2fd862922a0,46131,1732530799081, seqNum=-1] 2024-11-25T10:33:19,900 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-25T10:33:19,901 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51410, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-25T10:33:19,902 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e2fd862922a0,44549,1732530799039 2024-11-25T10:33:19,902 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-25T10:33:19,905 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-25T10:33:19,905 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-25T10:33:19,907 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/WALs/test.com,8080,1, archiveDir=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/oldWALs, maxLogs=32 2024-11-25T10:33:19,907 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732530799907 2024-11-25T10:33:19,912 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/WALs/test.com,8080,1/test.com%2C8080%2C1.1732530799907 2024-11-25T10:33:19,913 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43907:43907),(127.0.0.1/127.0.0.1:45723:45723)] 2024-11-25T10:33:19,914 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732530799913 2024-11-25T10:33:19,918 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:19,918 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:19,918 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:19,918 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:19,918 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:19,918 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/WALs/test.com,8080,1/test.com%2C8080%2C1.1732530799907 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/WALs/test.com,8080,1/test.com%2C8080%2C1.1732530799913 2024-11-25T10:33:19,919 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43907:43907),(127.0.0.1/127.0.0.1:45723:45723)] 2024-11-25T10:33:19,919 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/WALs/test.com,8080,1/test.com%2C8080%2C1.1732530799907 is not closed yet, will try archiving it next time 2024-11-25T10:33:19,920 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:19,920 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:19,920 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:19,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741835_1011 (size=93) 2024-11-25T10:33:19,920 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:19,920 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:19,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741835_1011 (size=93) 2024-11-25T10:33:19,921 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/WALs/test.com,8080,1/test.com%2C8080%2C1.1732530799907 to hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/oldWALs/test.com%2C8080%2C1.1732530799907 2024-11-25T10:33:19,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741836_1012 (size=93) 2024-11-25T10:33:19,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741836_1012 (size=93) 2024-11-25T10:33:19,924 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/oldWALs 2024-11-25T10:33:19,924 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732530799913) 2024-11-25T10:33:19,924 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-25T10:33:19,924 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T10:33:19,924 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:33:19,924 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:33:19,924 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:33:19,925 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-25T10:33:19,925 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-25T10:33:19,925 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=833038088, stopped=false 2024-11-25T10:33:19,925 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e2fd862922a0,44549,1732530799039 2024-11-25T10:33:19,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:33:19,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-25T10:33:19,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:19,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:19,926 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T10:33:19,926 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-25T10:33:19,926 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:33:19,926 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:33:19,927 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e2fd862922a0,46131,1732530799081' ***** 2024-11-25T10:33:19,927 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-25T10:33:19,927 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:33:19,927 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-25T10:33:19,927 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-25T10:33:19,927 INFO [RS:0;e2fd862922a0:46131 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-25T10:33:19,927 INFO [RS:0;e2fd862922a0:46131 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-25T10:33:19,927 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(959): stopping server e2fd862922a0,46131,1732530799081 2024-11-25T10:33:19,927 INFO [RS:0;e2fd862922a0:46131 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:33:19,927 INFO [RS:0;e2fd862922a0:46131 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e2fd862922a0:46131. 2024-11-25T10:33:19,927 DEBUG [RS:0;e2fd862922a0:46131 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-25T10:33:19,927 DEBUG [RS:0;e2fd862922a0:46131 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:33:19,927 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-25T10:33:19,928 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-25T10:33:19,928 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-25T10:33:19,928 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-25T10:33:19,928 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-25T10:33:19,928 DEBUG [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-25T10:33:19,928 DEBUG [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-25T10:33:19,928 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-25T10:33:19,928 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-25T10:33:19,928 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-25T10:33:19,928 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-25T10:33:19,928 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-25T10:33:19,928 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-25T10:33:19,928 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-25T10:33:19,944 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/data/hbase/meta/1588230740/.tmp/ns/225ef551920e4a5bbf9530c504f4908b is 43, key is default/ns:d/1732530799865/Put/seqid=0 2024-11-25T10:33:19,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741837_1013 (size=5153) 2024-11-25T10:33:19,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741837_1013 (size=5153) 2024-11-25T10:33:19,949 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/data/hbase/meta/1588230740/.tmp/ns/225ef551920e4a5bbf9530c504f4908b 2024-11-25T10:33:19,953 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/data/hbase/meta/1588230740/.tmp/ns/225ef551920e4a5bbf9530c504f4908b as hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/data/hbase/meta/1588230740/ns/225ef551920e4a5bbf9530c504f4908b 2024-11-25T10:33:19,958 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/data/hbase/meta/1588230740/ns/225ef551920e4a5bbf9530c504f4908b, entries=2, sequenceid=6, filesize=5.0 K 2024-11-25T10:33:19,959 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-11-25T10:33:19,959 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-25T10:33:19,963 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-25T10:33:19,963 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-25T10:33:19,963 INFO [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-25T10:33:19,963 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732530799928Running coprocessor pre-close hooks at 1732530799928Disabling compacts and flushes for region at 1732530799928Disabling writes for close at 1732530799928Obtaining lock to block concurrent updates at 1732530799928Preparing flush snapshotting stores in 1588230740 at 1732530799928Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732530799929 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732530799930 (+1 ms)Flushing 1588230740/ns: creating writer at 1732530799930Flushing 1588230740/ns: appending metadata at 1732530799944 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732530799944Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c1e249: reopening flushed file at 1732530799953 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1732530799959 (+6 ms)Writing region close event to WAL at 1732530799960 (+1 ms)Running coprocessor post-close hooks at 1732530799963 (+3 ms)Closed at 1732530799963 2024-11-25T10:33:19,963 DEBUG [RS_CLOSE_META-regionserver/e2fd862922a0:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-25T10:33:20,128 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(976): stopping server e2fd862922a0,46131,1732530799081; all regions closed. 2024-11-25T10:33:20,129 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,129 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,129 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,129 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,129 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741834_1010 (size=1152) 2024-11-25T10:33:20,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741834_1010 (size=1152) 2024-11-25T10:33:20,134 DEBUG [RS:0;e2fd862922a0:46131 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/oldWALs 2024-11-25T10:33:20,134 INFO [RS:0;e2fd862922a0:46131 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C46131%2C1732530799081.meta:.meta(num 1732530799820) 2024-11-25T10:33:20,134 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,134 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,134 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,134 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,134 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741833_1009 (size=93) 2024-11-25T10:33:20,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741833_1009 (size=93) 2024-11-25T10:33:20,138 DEBUG [RS:0;e2fd862922a0:46131 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/oldWALs 2024-11-25T10:33:20,138 INFO [RS:0;e2fd862922a0:46131 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e2fd862922a0%2C46131%2C1732530799081:(num 1732530799457) 2024-11-25T10:33:20,138 DEBUG [RS:0;e2fd862922a0:46131 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-25T10:33:20,138 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.LeaseManager(133): Closed leases 2024-11-25T10:33:20,139 INFO [RS:0;e2fd862922a0:46131 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:33:20,139 INFO [RS:0;e2fd862922a0:46131 {}] hbase.ChoreService(370): Chore service for: regionserver/e2fd862922a0:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-25T10:33:20,139 INFO [RS:0;e2fd862922a0:46131 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:33:20,139 INFO [regionserver/e2fd862922a0:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:33:20,139 INFO [RS:0;e2fd862922a0:46131 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46131 2024-11-25T10:33:20,141 INFO [RS:0;e2fd862922a0:46131 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:33:20,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e2fd862922a0,46131,1732530799081 2024-11-25T10:33:20,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-25T10:33:20,142 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e2fd862922a0,46131,1732530799081] 2024-11-25T10:33:20,143 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e2fd862922a0,46131,1732530799081 already deleted, retry=false 2024-11-25T10:33:20,143 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e2fd862922a0,46131,1732530799081 expired; onlineServers=0 2024-11-25T10:33:20,143 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e2fd862922a0,44549,1732530799039' ***** 2024-11-25T10:33:20,143 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-25T10:33:20,144 INFO [M:0;e2fd862922a0:44549 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-25T10:33:20,144 INFO [M:0;e2fd862922a0:44549 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-25T10:33:20,144 DEBUG [M:0;e2fd862922a0:44549 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-25T10:33:20,144 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-25T10:33:20,144 DEBUG [M:0;e2fd862922a0:44549 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-25T10:33:20,144 DEBUG [master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530799228 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.large.0-1732530799228,5,FailOnTimeoutGroup] 2024-11-25T10:33:20,144 DEBUG [master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530799228 {}] cleaner.HFileCleaner(306): Exit Thread[master/e2fd862922a0:0:becomeActiveMaster-HFileCleaner.small.0-1732530799228,5,FailOnTimeoutGroup] 2024-11-25T10:33:20,144 INFO [M:0;e2fd862922a0:44549 {}] hbase.ChoreService(370): Chore service for: master/e2fd862922a0:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-25T10:33:20,144 INFO [M:0;e2fd862922a0:44549 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-25T10:33:20,144 DEBUG [M:0;e2fd862922a0:44549 {}] master.HMaster(1795): Stopping service threads 2024-11-25T10:33:20,144 INFO [M:0;e2fd862922a0:44549 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-25T10:33:20,144 INFO [M:0;e2fd862922a0:44549 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-25T10:33:20,144 INFO [M:0;e2fd862922a0:44549 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-25T10:33:20,144 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-25T10:33:20,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-25T10:33:20,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-25T10:33:20,145 DEBUG [M:0;e2fd862922a0:44549 {}] zookeeper.ZKUtil(347): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-25T10:33:20,145 WARN [M:0;e2fd862922a0:44549 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-25T10:33:20,146 INFO [M:0;e2fd862922a0:44549 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/.lastflushedseqids 2024-11-25T10:33:20,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741838_1014 (size=99) 2024-11-25T10:33:20,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741838_1014 (size=99) 2024-11-25T10:33:20,154 INFO [M:0;e2fd862922a0:44549 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-25T10:33:20,154 INFO [M:0;e2fd862922a0:44549 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-25T10:33:20,154 DEBUG [M:0;e2fd862922a0:44549 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-25T10:33:20,154 INFO [M:0;e2fd862922a0:44549 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:33:20,154 DEBUG [M:0;e2fd862922a0:44549 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:33:20,154 DEBUG [M:0;e2fd862922a0:44549 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-25T10:33:20,154 DEBUG [M:0;e2fd862922a0:44549 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:33:20,154 INFO [M:0;e2fd862922a0:44549 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-25T10:33:20,170 DEBUG [M:0;e2fd862922a0:44549 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a9b12b0d6d014350805456e352ce7601 is 82, key is hbase:meta,,1/info:regioninfo/1732530799851/Put/seqid=0 2024-11-25T10:33:20,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741839_1015 (size=5672) 2024-11-25T10:33:20,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741839_1015 (size=5672) 2024-11-25T10:33:20,175 INFO [M:0;e2fd862922a0:44549 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a9b12b0d6d014350805456e352ce7601 2024-11-25T10:33:20,192 DEBUG [M:0;e2fd862922a0:44549 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d262dbc74e564362ae365ea97324d45b is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732530799868/Put/seqid=0 2024-11-25T10:33:20,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741840_1016 (size=5275) 2024-11-25T10:33:20,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741840_1016 (size=5275) 2024-11-25T10:33:20,198 INFO [M:0;e2fd862922a0:44549 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d262dbc74e564362ae365ea97324d45b 2024-11-25T10:33:20,215 DEBUG [M:0;e2fd862922a0:44549 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b98102813f664241be899ea10ba7db4d is 69, key is e2fd862922a0,46131,1732530799081/rs:state/1732530799315/Put/seqid=0 2024-11-25T10:33:20,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741841_1017 (size=5156) 2024-11-25T10:33:20,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741841_1017 (size=5156) 2024-11-25T10:33:20,220 INFO [M:0;e2fd862922a0:44549 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b98102813f664241be899ea10ba7db4d 2024-11-25T10:33:20,237 DEBUG [M:0;e2fd862922a0:44549 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d5867cd4766d415eb908b46eb9adf341 is 52, key is load_balancer_on/state:d/1732530799904/Put/seqid=0 2024-11-25T10:33:20,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741842_1018 (size=5056) 2024-11-25T10:33:20,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741842_1018 (size=5056) 2024-11-25T10:33:20,242 INFO [M:0;e2fd862922a0:44549 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d5867cd4766d415eb908b46eb9adf341 2024-11-25T10:33:20,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:33:20,242 INFO [RS:0;e2fd862922a0:46131 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:33:20,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46131-0x1014eafec410001, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:33:20,242 INFO [RS:0;e2fd862922a0:46131 {}] regionserver.HRegionServer(1031): Exiting; stopping=e2fd862922a0,46131,1732530799081; zookeeper connection closed. 2024-11-25T10:33:20,242 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@538b84db {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@538b84db 2024-11-25T10:33:20,242 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-25T10:33:20,247 DEBUG [M:0;e2fd862922a0:44549 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a9b12b0d6d014350805456e352ce7601 as hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a9b12b0d6d014350805456e352ce7601 2024-11-25T10:33:20,251 INFO [M:0;e2fd862922a0:44549 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a9b12b0d6d014350805456e352ce7601, entries=8, sequenceid=29, filesize=5.5 K 2024-11-25T10:33:20,252 DEBUG [M:0;e2fd862922a0:44549 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d262dbc74e564362ae365ea97324d45b as hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d262dbc74e564362ae365ea97324d45b 2024-11-25T10:33:20,256 INFO [M:0;e2fd862922a0:44549 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d262dbc74e564362ae365ea97324d45b, entries=3, sequenceid=29, filesize=5.2 K 2024-11-25T10:33:20,256 DEBUG [M:0;e2fd862922a0:44549 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b98102813f664241be899ea10ba7db4d as hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b98102813f664241be899ea10ba7db4d 2024-11-25T10:33:20,260 INFO [M:0;e2fd862922a0:44549 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b98102813f664241be899ea10ba7db4d, entries=1, sequenceid=29, filesize=5.0 K 2024-11-25T10:33:20,261 DEBUG [M:0;e2fd862922a0:44549 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d5867cd4766d415eb908b46eb9adf341 as hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d5867cd4766d415eb908b46eb9adf341 2024-11-25T10:33:20,264 INFO [M:0;e2fd862922a0:44549 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41303/user/jenkins/test-data/eb6674d7-8aac-f800-c23b-5811f4b1a91d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d5867cd4766d415eb908b46eb9adf341, entries=1, sequenceid=29, filesize=4.9 K 2024-11-25T10:33:20,265 INFO [M:0;e2fd862922a0:44549 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=29, compaction requested=false 2024-11-25T10:33:20,266 INFO [M:0;e2fd862922a0:44549 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-25T10:33:20,267 DEBUG [M:0;e2fd862922a0:44549 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732530800154Disabling compacts and flushes for region at 1732530800154Disabling writes for close at 1732530800154Obtaining lock to block concurrent updates at 1732530800154Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732530800154Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732530800155 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732530800155Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732530800156 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732530800170 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732530800170Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732530800179 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732530800192 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732530800192Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732530800201 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732530800214 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732530800214Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732530800223 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732530800237 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732530800237Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f1d6c08: reopening flushed file at 1732530800246 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f3d8828: reopening flushed file at 1732530800251 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39f19d14: reopening flushed file at 1732530800256 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1cbe06d5: reopening flushed file at 1732530800260 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=29, compaction requested=false at 1732530800265 (+5 ms)Writing region close event to WAL at 1732530800266 (+1 ms)Closed at 1732530800266 2024-11-25T10:33:20,267 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,267 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,267 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,267 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,267 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-25T10:33:20,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43409 is added to blk_1073741830_1006 (size=10311) 2024-11-25T10:33:20,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39441 is added to blk_1073741830_1006 (size=10311) 2024-11-25T10:33:20,270 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-25T10:33:20,270 INFO [M:0;e2fd862922a0:44549 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-25T10:33:20,270 INFO [M:0;e2fd862922a0:44549 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44549 2024-11-25T10:33:20,270 INFO [M:0;e2fd862922a0:44549 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-25T10:33:20,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:33:20,372 INFO [M:0;e2fd862922a0:44549 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-25T10:33:20,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44549-0x1014eafec410000, quorum=127.0.0.1:64684, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-25T10:33:20,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45e68726{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:33:20,374 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6e1cda33{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:33:20,374 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:33:20,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@352a3917{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:33:20,374 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@536a5c19{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/hadoop.log.dir/,STOPPED} 2024-11-25T10:33:20,376 WARN [BP-1149931814-172.17.0.2-1732530798283 heartbeating to localhost/127.0.0.1:41303 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:33:20,376 WARN [BP-1149931814-172.17.0.2-1732530798283 heartbeating to localhost/127.0.0.1:41303 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1149931814-172.17.0.2-1732530798283 (Datanode Uuid dac5dcaf-2be4-45a1-b4f7-89d3f3f285cf) service to localhost/127.0.0.1:41303 2024-11-25T10:33:20,376 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:33:20,376 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:33:20,376 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/cluster_408c46ca-9ca1-563e-4815-f028d18605ec/data/data3/current/BP-1149931814-172.17.0.2-1732530798283 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:33:20,377 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/cluster_408c46ca-9ca1-563e-4815-f028d18605ec/data/data4/current/BP-1149931814-172.17.0.2-1732530798283 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:33:20,377 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:33:20,378 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b825638{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-25T10:33:20,379 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fcb3743{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:33:20,379 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:33:20,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bfccd5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:33:20,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35a6030c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/hadoop.log.dir/,STOPPED} 2024-11-25T10:33:20,380 WARN [BP-1149931814-172.17.0.2-1732530798283 heartbeating to localhost/127.0.0.1:41303 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-25T10:33:20,380 WARN [BP-1149931814-172.17.0.2-1732530798283 heartbeating to localhost/127.0.0.1:41303 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1149931814-172.17.0.2-1732530798283 (Datanode Uuid f6bac507-d39b-407a-98d5-d3eb389b7d89) service to localhost/127.0.0.1:41303 2024-11-25T10:33:20,381 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/cluster_408c46ca-9ca1-563e-4815-f028d18605ec/data/data1/current/BP-1149931814-172.17.0.2-1732530798283 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:33:20,381 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/cluster_408c46ca-9ca1-563e-4815-f028d18605ec/data/data2/current/BP-1149931814-172.17.0.2-1732530798283 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-25T10:33:20,381 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-25T10:33:20,381 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-25T10:33:20,381 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-25T10:33:20,386 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3637fc70{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-25T10:33:20,387 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6dda9ea2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-25T10:33:20,387 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-25T10:33:20,387 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3287f588{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-25T10:33:20,387 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@442ec576{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/29794438-e19c-a5cd-8137-1084ca3c2669/hadoop.log.dir/,STOPPED} 2024-11-25T10:33:20,393 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-25T10:33:20,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-25T10:33:20,415 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=271 (was 233) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41303 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:41303 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: globalEventExecutor-1-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41303 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41303 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:41303 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:41303 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:41303 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41303 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=537 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=84 (was 83) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1482 (was 1491)